repo_id
int64
0
79
repo_name
stringlengths
14
39
project_context
stringlengths
51.2k
203k
file_context
list
gt
sequence
metainfo_separator
stringclasses
1 value
52
bmaltais__kohya_ss
bmaltais__kohya_ss METASEP tools/prune.py METASEP import argparse import torch from tqdm import tqdm parser = argparse.ArgumentParser(description="Prune a model") parser.add_argument("model_prune", type=str, help="Path to model to prune") parser.add_argument("prune_output", type=str, help="Path to pruned ckpt output") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") args = parser.parse_args() print("Loading model...") model_prune = torch.load(args.model_prune) theta_prune = model_prune["state_dict"] theta = {} print("Pruning model...") for key in tqdm(theta_prune.keys(), desc="Pruning keys"): if "model" in key: theta.update({key: theta_prune[key]}) del theta_prune if args.half: print("Halving model...") state_dict = {k: v.half() for k, v in tqdm(theta.items(), desc="Halving weights")} else: state_dict = theta del theta print("Saving pruned model...") torch.save({"state_dict": state_dict}, args.prune_output) del state_dict print("Done pruning!") tools/convert_images_to_webp.py METASEP import argparse import glob import os from pathlib import Path from PIL import Image def main(): # Define the command-line arguments parser = argparse.ArgumentParser() parser.add_argument("directory", type=str, help="the directory containing the images to be converted") parser.add_argument("--in_ext", type=str, default="webp", help="the input file extension") parser.add_argument("--delete_originals", action="store_true", help="whether to delete the original files after conversion") # Parse the command-line arguments args = parser.parse_args() directory = args.directory in_ext = args.in_ext delete_originals = args.delete_originals # Set the output file extension to .webp out_ext = "webp" # Create the file pattern string using the input file extension file_pattern = f"*.{in_ext}" # Get the list of files in the directory that match the file pattern files = glob.glob(os.path.join(directory, file_pattern)) # Iterate over the list of files for file in files: # Open the image file img = Image.open(file) # Create a new file path with the output file extension new_path = Path(file).with_suffix(f".{out_ext}") print(new_path) # Check if the output file already exists if new_path.exists(): # Skip the conversion if the output file already exists print(f"Skipping {file} because {new_path} already exists") continue # Save the image to the new file as lossless img.save(new_path, lossless=True) # Optionally, delete the original file if delete_originals: os.remove(file) if __name__ == "__main__": main() tools/convert_images_to_hq_jpg.py METASEP import argparse import glob import os from pathlib import Path from PIL import Image def main(): # Define the command-line arguments parser = argparse.ArgumentParser() parser.add_argument("directory", type=str, help="the directory containing the images to be converted") parser.add_argument("--in_ext", type=str, default="webp", help="the input file extension") parser.add_argument("--quality", type=int, default=95, help="the JPEG quality (0-100)") parser.add_argument("--delete_originals", action="store_true", help="whether to delete the original files after conversion") # Parse the command-line arguments args = parser.parse_args() directory = args.directory in_ext = args.in_ext out_ext = "jpg" quality = args.quality delete_originals = args.delete_originals # Create the file pattern string using the input file extension file_pattern = f"*.{in_ext}" # Get the list of files in the directory that match the file pattern files = glob.glob(os.path.join(directory, file_pattern)) # Iterate over the list of files for file in files: # Open the image file img = Image.open(file) # Create a new file path with the output file extension new_path = Path(file).with_suffix(f".{out_ext}") # Check if the output file already exists if new_path.exists(): # Skip the conversion if the output file already exists print(f"Skipping {file} because {new_path} already exists") continue # Save the image to the new file as high-quality JPEG img.save(new_path, quality=quality, optimize=True) # Optionally, delete the original file if delete_originals: os.remove(file) if __name__ == "__main__": main() tools/convert_diffusers20_original_sd.py METASEP # convert Diffusers v1.x/v2.0 model to original Stable Diffusion # v1: initial version # v2: support safetensors # v3: fix to support another format # v4: support safetensors in Diffusers import argparse import os import torch from diffusers import StableDiffusionPipeline from library import model_util as model_util def convert(args): # 引数を確認する load_dtype = torch.float16 if args.fp16 else None save_dtype = None if args.fp16: save_dtype = torch.float16 elif args.bf16: save_dtype = torch.bfloat16 elif args.float: save_dtype = torch.float is_load_ckpt = os.path.isfile(args.model_to_load) is_save_ckpt = len(os.path.splitext(args.model_to_save)[1]) > 0 assert not is_load_ckpt or args.v1 != args.v2, f"v1 or v2 is required to load checkpoint / checkpointの読み込みにはv1/v2指定が必要です" assert is_save_ckpt or args.reference_model is not None, f"reference model is required to save as Diffusers / Diffusers形式での保存には参照モデルが必要です" # モデルを読み込む msg = "checkpoint" if is_load_ckpt else ("Diffusers" + (" as fp16" if args.fp16 else "")) print(f"loading {msg}: {args.model_to_load}") if is_load_ckpt: v2_model = args.v2 text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(v2_model, args.model_to_load) else: pipe = StableDiffusionPipeline.from_pretrained(args.model_to_load, torch_dtype=load_dtype, tokenizer=None, safety_checker=None) text_encoder = pipe.text_encoder vae = pipe.vae unet = pipe.unet if args.v1 == args.v2: # 自動判定する v2_model = unet.config.cross_attention_dim == 1024 print("checking model version: model is " + ('v2' if v2_model else 'v1')) else: v2_model = args.v1 # 変換して保存する msg = ("checkpoint" + ("" if save_dtype is None else f" in {save_dtype}")) if is_save_ckpt else "Diffusers" print(f"converting and saving as {msg}: {args.model_to_save}") if is_save_ckpt: original_model = args.model_to_load if is_load_ckpt else None key_count = model_util.save_stable_diffusion_checkpoint(v2_model, args.model_to_save, text_encoder, unet, original_model, args.epoch, args.global_step, save_dtype, vae) print(f"model saved. total converted state_dict keys: {key_count}") else: print(f"copy scheduler/tokenizer config from: {args.reference_model}") model_util.save_diffusers_checkpoint(v2_model, args.model_to_save, text_encoder, unet, args.reference_model, vae, args.use_safetensors) print(f"model saved.") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--v1", action='store_true', help='load v1.x model (v1 or v2 is required to load checkpoint) / 1.xのモデルを読み込む') parser.add_argument("--v2", action='store_true', help='load v2.0 model (v1 or v2 is required to load checkpoint) / 2.0のモデルを読み込む') parser.add_argument("--fp16", action='store_true', help='load as fp16 (Diffusers only) and save as fp16 (checkpoint only) / fp16形式で読み込み(Diffusers形式のみ対応)、保存する(checkpointのみ対応)') parser.add_argument("--bf16", action='store_true', help='save as bf16 (checkpoint only) / bf16形式で保存する(checkpointのみ対応)') parser.add_argument("--float", action='store_true', help='save as float (checkpoint only) / float(float32)形式で保存する(checkpointのみ対応)') parser.add_argument("--epoch", type=int, default=0, help='epoch to write to checkpoint / checkpointに記録するepoch数の値') parser.add_argument("--global_step", type=int, default=0, help='global_step to write to checkpoint / checkpointに記録するglobal_stepの値') parser.add_argument("--reference_model", type=str, default=None, help="reference model for schduler/tokenizer, required in saving Diffusers, copy schduler/tokenizer from this / scheduler/tokenizerのコピー元のDiffusersモデル、Diffusers形式で保存するときに必要") parser.add_argument("--use_safetensors", action='store_true', help="use safetensors format to save Diffusers model (checkpoint depends on the file extension) / Duffusersモデルをsafetensors形式で保存する(checkpointは拡張子で自動判定)") parser.add_argument("model_to_load", type=str, default=None, help="model to load: checkpoint file or Diffusers model's directory / 読み込むモデル、checkpointかDiffusers形式モデルのディレクトリ") parser.add_argument("model_to_save", type=str, default=None, help="model to save: checkpoint (with extension) or Diffusers model's directory (without extension) / 変換後のモデル、拡張子がある場合はcheckpoint、ない場合はDiffusesモデルとして保存") args = parser.parse_args() convert(args) tools/caption.py METASEP # This script will create the caption text files in the specified folder using the specified file pattern and caption text. # # eg: python caption.py D:\some\folder\location "*.png, *.jpg, *.webp" "some caption text" import argparse # import glob # import os from pathlib import Path def create_caption_files(image_folder: str, file_pattern: str, caption_text: str, caption_file_ext: str, overwrite: bool): # Split the file patterns string and strip whitespace from each pattern patterns = [pattern.strip() for pattern in file_pattern.split(",")] # Create a Path object for the image folder folder = Path(image_folder) # Iterate over the file patterns for pattern in patterns: # Use the glob method to match the file patterns files = folder.glob(pattern) # Iterate over the matched files for file in files: # Check if a text file with the same name as the current file exists in the folder txt_file = file.with_suffix(caption_file_ext) if not txt_file.exists() or overwrite: # Create a text file with the caption text in the folder, if it does not already exist # or if the overwrite argument is True with open(txt_file, "w") as f: f.write(caption_text) def main(): # Define command-line arguments parser = argparse.ArgumentParser() parser.add_argument("image_folder", type=str, help="the folder where the image files are located") parser.add_argument("--file_pattern", type=str, default="*.png, *.jpg, *.jpeg, *.webp", help="the pattern to match the image file names") parser.add_argument("--caption_file_ext", type=str, default=".caption", help="the caption file extension.") parser.add_argument("--overwrite", action="store_true", default=False, help="whether to overwrite existing caption files") # Create a mutually exclusive group for the caption_text and caption_file arguments group = parser.add_mutually_exclusive_group() group.add_argument("--caption_text", type=str, help="the text to include in the caption files") group.add_argument("--caption_file", type=argparse.FileType("r"), help="the file containing the text to include in the caption files") # Parse the command-line arguments args = parser.parse_args() image_folder = args.image_folder file_pattern = args.file_pattern caption_file_ext = args.caption_file_ext overwrite = args.overwrite # Get the caption text from either the caption_text or caption_file argument if args.caption_text: caption_text = args.caption_text elif args.caption_file: caption_text = args.caption_file.read() # Create a Path object for the image folder folder = Path(image_folder) # Check if the image folder exists and is a directory if not folder.is_dir(): raise ValueError(f"{image_folder} is not a valid directory.") # Create the caption files create_caption_files(image_folder, file_pattern, caption_text, caption_file_ext, overwrite) if __name__ == "__main__": main() library/wd14_caption_gui.py METASEP import gradio as gr from easygui import msgbox import subprocess from .common_gui import get_folder_path def caption_images(train_data_dir, caption_extension, batch_size, thresh): # Check for caption_text_input # if caption_text_input == "": # msgbox("Caption text is missing...") # return # Check for images_dir_input if train_data_dir == '': msgbox('Image folder is missing...') return print(f'Captioning files in {train_data_dir}...') run_cmd = f'accelerate launch "./script/tag_images_by_wd14_tagger.py"' run_cmd += f' --batch_size="{int(batch_size)}"' run_cmd += f' --thresh="{thresh}"' if caption_extension != '': run_cmd += f' --caption_extension="{caption_extension}"' run_cmd += f' "{train_data_dir}"' print(run_cmd) # Run the command subprocess.run(run_cmd) print('...captioning done') ### # Gradio UI ### def gradio_wd14_caption_gui_tab(): with gr.Tab('WD14 Captioning'): gr.Markdown( 'This utility will use WD14 to caption files for each images in a folder.' ) with gr.Row(): train_data_dir = gr.Textbox( label='Image folder to caption', placeholder='Directory containing the images to caption', interactive=True, ) button_train_data_dir_input = gr.Button( '📂', elem_id='open_folder_small' ) button_train_data_dir_input.click( get_folder_path, outputs=train_data_dir ) caption_extension = gr.Textbox( label='Caption file extension', placeholder='(Optional) Default: .caption', interactive=True, ) thresh = gr.Number(value=0.35, label='Threshold') batch_size = gr.Number( value=1, label='Batch size', interactive=True ) caption_button = gr.Button('Caption images') caption_button.click( caption_images, inputs=[train_data_dir, caption_extension, batch_size, thresh], ) library/model_util.py METASEP # v1: split from train_db_fixed.py. # v2: support safetensors import math import os import torch from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextConfig from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionPipeline, UNet2DConditionModel, ) from safetensors.torch import load_file, save_file # DiffUsers版StableDiffusionのモデルパラメータ NUM_TRAIN_TIMESTEPS = 1000 BETA_START = 0.00085 BETA_END = 0.0120 UNET_PARAMS_MODEL_CHANNELS = 320 UNET_PARAMS_CHANNEL_MULT = [1, 2, 4, 4] UNET_PARAMS_ATTENTION_RESOLUTIONS = [4, 2, 1] UNET_PARAMS_IMAGE_SIZE = 32 # unused UNET_PARAMS_IN_CHANNELS = 4 UNET_PARAMS_OUT_CHANNELS = 4 UNET_PARAMS_NUM_RES_BLOCKS = 2 UNET_PARAMS_CONTEXT_DIM = 768 UNET_PARAMS_NUM_HEADS = 8 VAE_PARAMS_Z_CHANNELS = 4 VAE_PARAMS_RESOLUTION = 256 VAE_PARAMS_IN_CHANNELS = 3 VAE_PARAMS_OUT_CH = 3 VAE_PARAMS_CH = 128 VAE_PARAMS_CH_MULT = [1, 2, 4, 4] VAE_PARAMS_NUM_RES_BLOCKS = 2 # V2 V2_UNET_PARAMS_ATTENTION_HEAD_DIM = [5, 10, 20, 20] V2_UNET_PARAMS_CONTEXT_DIM = 1024 # Diffusersの設定を読み込むための参照モデル DIFFUSERS_REF_MODEL_ID_V1 = 'runwayml/stable-diffusion-v1-5' DIFFUSERS_REF_MODEL_ID_V2 = 'stabilityai/stable-diffusion-2-1' # region StableDiffusion->Diffusersの変換コード # convert_original_stable_diffusion_to_diffusers をコピーして修正している(ASL 2.0) def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return '.'.join(path.split('.')[n_shave_prefix_segments:]) else: return '.'.join(path.split('.')[:n_shave_prefix_segments]) def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace('in_layers.0', 'norm1') new_item = new_item.replace('in_layers.2', 'conv1') new_item = new_item.replace('out_layers.0', 'norm2') new_item = new_item.replace('out_layers.3', 'conv2') new_item = new_item.replace('emb_layers.1', 'time_emb_proj') new_item = new_item.replace('skip_connection', 'conv_shortcut') new_item = shave_segments( new_item, n_shave_prefix_segments=n_shave_prefix_segments ) mapping.append({'old': old_item, 'new': new_item}) return mapping def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace('nin_shortcut', 'conv_shortcut') new_item = shave_segments( new_item, n_shave_prefix_segments=n_shave_prefix_segments ) mapping.append({'old': old_item, 'new': new_item}) return mapping def renew_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item # new_item = new_item.replace('norm.weight', 'group_norm.weight') # new_item = new_item.replace('norm.bias', 'group_norm.bias') # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({'old': old_item, 'new': new_item}) return mapping def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace('norm.weight', 'group_norm.weight') new_item = new_item.replace('norm.bias', 'group_norm.bias') new_item = new_item.replace('q.weight', 'query.weight') new_item = new_item.replace('q.bias', 'query.bias') new_item = new_item.replace('k.weight', 'key.weight') new_item = new_item.replace('k.bias', 'key.bias') new_item = new_item.replace('v.weight', 'value.weight') new_item = new_item.replace('v.bias', 'value.bias') new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') new_item = shave_segments( new_item, n_shave_prefix_segments=n_shave_prefix_segments ) mapping.append({'old': old_item, 'new': new_item}) return mapping def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None, ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance( paths, list ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = ( (-1, channels) if len(old_tensor.shape) == 3 else (-1) ) num_heads = old_tensor.shape[0] // config['num_head_channels'] // 3 old_tensor = old_tensor.reshape( (num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map['query']] = query.reshape(target_shape) checkpoint[path_map['key']] = key.reshape(target_shape) checkpoint[path_map['value']] = value.reshape(target_shape) for path in paths: new_path = path['new'] # These have already been assigned if ( attention_paths_to_split is not None and new_path in attention_paths_to_split ): continue # Global renaming happens here new_path = new_path.replace('middle_block.0', 'mid_block.resnets.0') new_path = new_path.replace('middle_block.1', 'mid_block.attentions.0') new_path = new_path.replace('middle_block.2', 'mid_block.resnets.1') if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace( replacement['old'], replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if 'proj_attn.weight' in new_path: checkpoint[new_path] = old_checkpoint[path['old']][:, :, 0] else: checkpoint[new_path] = old_checkpoint[path['old']] def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ['query.weight', 'key.weight', 'value.weight'] for key in keys: if '.'.join(key.split('.')[-2:]) in attn_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] elif 'proj_attn.weight' in key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0] def linear_transformer_to_conv(checkpoint): keys = list(checkpoint.keys()) tf_keys = ['proj_in.weight', 'proj_out.weight'] for key in keys: if '.'.join(key.split('.')[-2:]) in tf_keys: if checkpoint[key].ndim == 2: checkpoint[key] = checkpoint[key].unsqueeze(2).unsqueeze(2) def convert_ldm_unet_checkpoint(v2, checkpoint, config): """ Takes a state dict and a config, and returns a converted checkpoint. """ # extract state_dict for UNet unet_state_dict = {} unet_key = 'model.diffusion_model.' keys = list(checkpoint.keys()) for key in keys: if key.startswith(unet_key): unet_state_dict[key.replace(unet_key, '')] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint['time_embedding.linear_1.weight'] = unet_state_dict[ 'time_embed.0.weight' ] new_checkpoint['time_embedding.linear_1.bias'] = unet_state_dict[ 'time_embed.0.bias' ] new_checkpoint['time_embedding.linear_2.weight'] = unet_state_dict[ 'time_embed.2.weight' ] new_checkpoint['time_embedding.linear_2.bias'] = unet_state_dict[ 'time_embed.2.bias' ] new_checkpoint['conv_in.weight'] = unet_state_dict[ 'input_blocks.0.0.weight' ] new_checkpoint['conv_in.bias'] = unet_state_dict['input_blocks.0.0.bias'] new_checkpoint['conv_norm_out.weight'] = unet_state_dict['out.0.weight'] new_checkpoint['conv_norm_out.bias'] = unet_state_dict['out.0.bias'] new_checkpoint['conv_out.weight'] = unet_state_dict['out.2.weight'] new_checkpoint['conv_out.bias'] = unet_state_dict['out.2.bias'] # Retrieves the keys for the input blocks only num_input_blocks = len( { '.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'input_blocks' in layer } ) input_blocks = { layer_id: [ key for key in unet_state_dict if f'input_blocks.{layer_id}.' in key ] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len( { '.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'middle_block' in layer } ) middle_blocks = { layer_id: [ key for key in unet_state_dict if f'middle_block.{layer_id}.' in key ] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len( { '.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'output_blocks' in layer } ) output_blocks = { layer_id: [ key for key in unet_state_dict if f'output_blocks.{layer_id}.' in key ] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config['layers_per_block'] + 1) layer_in_block_id = (i - 1) % (config['layers_per_block'] + 1) resnets = [ key for key in input_blocks[i] if f'input_blocks.{i}.0' in key and f'input_blocks.{i}.0.op' not in key ] attentions = [ key for key in input_blocks[i] if f'input_blocks.{i}.1' in key ] if f'input_blocks.{i}.0.op.weight' in unet_state_dict: new_checkpoint[ f'down_blocks.{block_id}.downsamplers.0.conv.weight' ] = unet_state_dict.pop(f'input_blocks.{i}.0.op.weight') new_checkpoint[ f'down_blocks.{block_id}.downsamplers.0.conv.bias' ] = unet_state_dict.pop(f'input_blocks.{i}.0.op.bias') paths = renew_resnet_paths(resnets) meta_path = { 'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}', } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config, ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = { 'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config, ) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_paths = renew_resnet_paths(resnet_0) assign_to_checkpoint( resnet_0_paths, new_checkpoint, unet_state_dict, config=config ) resnet_1_paths = renew_resnet_paths(resnet_1) assign_to_checkpoint( resnet_1_paths, new_checkpoint, unet_state_dict, config=config ) attentions_paths = renew_attention_paths(attentions) meta_path = {'old': 'middle_block.1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint( attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config, ) for i in range(num_output_blocks): block_id = i // (config['layers_per_block'] + 1) layer_in_block_id = i % (config['layers_per_block'] + 1) output_block_layers = [ shave_segments(name, 2) for name in output_blocks[i] ] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split('.')[0], shave_segments( layer, 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [ key for key in output_blocks[i] if f'output_blocks.{i}.0' in key ] attentions = [ key for key in output_blocks[i] if f'output_blocks.{i}.1' in key ] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) meta_path = { 'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}', } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config, ) # オリジナル: # if ["conv.weight", "conv.bias"] in output_block_list.values(): # index = list(output_block_list.values()).index(["conv.weight", "conv.bias"]) # biasとweightの順番に依存しないようにする:もっといいやり方がありそうだが for l in output_block_list.values(): l.sort() if ['conv.bias', 'conv.weight'] in output_block_list.values(): index = list(output_block_list.values()).index( ['conv.bias', 'conv.weight'] ) new_checkpoint[ f'up_blocks.{block_id}.upsamplers.0.conv.bias' ] = unet_state_dict[f'output_blocks.{i}.{index}.conv.bias'] new_checkpoint[ f'up_blocks.{block_id}.upsamplers.0.conv.weight' ] = unet_state_dict[f'output_blocks.{i}.{index}.conv.weight'] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = { 'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config, ) else: resnet_0_paths = renew_resnet_paths( output_block_layers, n_shave_prefix_segments=1 ) for path in resnet_0_paths: old_path = '.'.join(['output_blocks', str(i), path['old']]) new_path = '.'.join( [ 'up_blocks', str(block_id), 'resnets', str(layer_in_block_id), path['new'], ] ) new_checkpoint[new_path] = unet_state_dict[old_path] # SDのv2では1*1のconv2dがlinearに変わっているので、linear->convに変換する if v2: linear_transformer_to_conv(new_checkpoint) return new_checkpoint def convert_ldm_vae_checkpoint(checkpoint, config): # extract state dict for VAE vae_state_dict = {} vae_key = 'first_stage_model.' keys = list(checkpoint.keys()) for key in keys: if key.startswith(vae_key): vae_state_dict[key.replace(vae_key, '')] = checkpoint.get(key) # if len(vae_state_dict) == 0: # # 渡されたcheckpointは.ckptから読み込んだcheckpointではなくvaeのstate_dict # vae_state_dict = checkpoint new_checkpoint = {} new_checkpoint['encoder.conv_in.weight'] = vae_state_dict[ 'encoder.conv_in.weight' ] new_checkpoint['encoder.conv_in.bias'] = vae_state_dict[ 'encoder.conv_in.bias' ] new_checkpoint['encoder.conv_out.weight'] = vae_state_dict[ 'encoder.conv_out.weight' ] new_checkpoint['encoder.conv_out.bias'] = vae_state_dict[ 'encoder.conv_out.bias' ] new_checkpoint['encoder.conv_norm_out.weight'] = vae_state_dict[ 'encoder.norm_out.weight' ] new_checkpoint['encoder.conv_norm_out.bias'] = vae_state_dict[ 'encoder.norm_out.bias' ] new_checkpoint['decoder.conv_in.weight'] = vae_state_dict[ 'decoder.conv_in.weight' ] new_checkpoint['decoder.conv_in.bias'] = vae_state_dict[ 'decoder.conv_in.bias' ] new_checkpoint['decoder.conv_out.weight'] = vae_state_dict[ 'decoder.conv_out.weight' ] new_checkpoint['decoder.conv_out.bias'] = vae_state_dict[ 'decoder.conv_out.bias' ] new_checkpoint['decoder.conv_norm_out.weight'] = vae_state_dict[ 'decoder.norm_out.weight' ] new_checkpoint['decoder.conv_norm_out.bias'] = vae_state_dict[ 'decoder.norm_out.bias' ] new_checkpoint['quant_conv.weight'] = vae_state_dict['quant_conv.weight'] new_checkpoint['quant_conv.bias'] = vae_state_dict['quant_conv.bias'] new_checkpoint['post_quant_conv.weight'] = vae_state_dict[ 'post_quant_conv.weight' ] new_checkpoint['post_quant_conv.bias'] = vae_state_dict[ 'post_quant_conv.bias' ] # Retrieves the keys for the encoder down blocks only num_down_blocks = len( { '.'.join(layer.split('.')[:3]) for layer in vae_state_dict if 'encoder.down' in layer } ) down_blocks = { layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(num_down_blocks) } # Retrieves the keys for the decoder up blocks only num_up_blocks = len( { '.'.join(layer.split('.')[:3]) for layer in vae_state_dict if 'decoder.up' in layer } ) up_blocks = { layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(num_up_blocks) } for i in range(num_down_blocks): resnets = [ key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key ] if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict: new_checkpoint[ f'encoder.down_blocks.{i}.downsamplers.0.conv.weight' ] = vae_state_dict.pop(f'encoder.down.{i}.downsample.conv.weight') new_checkpoint[ f'encoder.down_blocks.{i}.downsamplers.0.conv.bias' ] = vae_state_dict.pop(f'encoder.down.{i}.downsample.conv.bias') paths = renew_vae_resnet_paths(resnets) meta_path = { 'old': f'down.{i}.block', 'new': f'down_blocks.{i}.resnets', } assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) mid_resnets = [key for key in vae_state_dict if 'encoder.mid.block' in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [ key for key in mid_resnets if f'encoder.mid.block_{i}' in key ] paths = renew_vae_resnet_paths(resnets) meta_path = { 'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}', } assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) mid_attentions = [ key for key in vae_state_dict if 'encoder.mid.attn' in key ] paths = renew_vae_attention_paths(mid_attentions) meta_path = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key ] if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict: new_checkpoint[ f'decoder.up_blocks.{i}.upsamplers.0.conv.weight' ] = vae_state_dict[f'decoder.up.{block_id}.upsample.conv.weight'] new_checkpoint[ f'decoder.up_blocks.{i}.upsamplers.0.conv.bias' ] = vae_state_dict[f'decoder.up.{block_id}.upsample.conv.bias'] paths = renew_vae_resnet_paths(resnets) meta_path = { 'old': f'up.{block_id}.block', 'new': f'up_blocks.{i}.resnets', } assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) mid_resnets = [key for key in vae_state_dict if 'decoder.mid.block' in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [ key for key in mid_resnets if f'decoder.mid.block_{i}' in key ] paths = renew_vae_resnet_paths(resnets) meta_path = { 'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}', } assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) mid_attentions = [ key for key in vae_state_dict if 'decoder.mid.attn' in key ] paths = renew_vae_attention_paths(mid_attentions) meta_path = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) conv_attn_to_linear(new_checkpoint) return new_checkpoint def create_unet_diffusers_config(v2): """ Creates a config for the diffusers based on the config of the LDM model. """ # unet_params = original_config.model.params.unet_config.params block_out_channels = [ UNET_PARAMS_MODEL_CHANNELS * mult for mult in UNET_PARAMS_CHANNEL_MULT ] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = ( 'CrossAttnDownBlock2D' if resolution in UNET_PARAMS_ATTENTION_RESOLUTIONS else 'DownBlock2D' ) down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = ( 'CrossAttnUpBlock2D' if resolution in UNET_PARAMS_ATTENTION_RESOLUTIONS else 'UpBlock2D' ) up_block_types.append(block_type) resolution //= 2 config = dict( sample_size=UNET_PARAMS_IMAGE_SIZE, in_channels=UNET_PARAMS_IN_CHANNELS, out_channels=UNET_PARAMS_OUT_CHANNELS, down_block_types=tuple(down_block_types), up_block_types=tuple(up_block_types), block_out_channels=tuple(block_out_channels), layers_per_block=UNET_PARAMS_NUM_RES_BLOCKS, cross_attention_dim=UNET_PARAMS_CONTEXT_DIM if not v2 else V2_UNET_PARAMS_CONTEXT_DIM, attention_head_dim=UNET_PARAMS_NUM_HEADS if not v2 else V2_UNET_PARAMS_ATTENTION_HEAD_DIM, ) return config def create_vae_diffusers_config(): """ Creates a config for the diffusers based on the config of the LDM model. """ # vae_params = original_config.model.params.first_stage_config.params.ddconfig # _ = original_config.model.params.first_stage_config.params.embed_dim block_out_channels = [VAE_PARAMS_CH * mult for mult in VAE_PARAMS_CH_MULT] down_block_types = ['DownEncoderBlock2D'] * len(block_out_channels) up_block_types = ['UpDecoderBlock2D'] * len(block_out_channels) config = dict( sample_size=VAE_PARAMS_RESOLUTION, in_channels=VAE_PARAMS_IN_CHANNELS, out_channels=VAE_PARAMS_OUT_CH, down_block_types=tuple(down_block_types), up_block_types=tuple(up_block_types), block_out_channels=tuple(block_out_channels), latent_channels=VAE_PARAMS_Z_CHANNELS, layers_per_block=VAE_PARAMS_NUM_RES_BLOCKS, ) return config def convert_ldm_clip_checkpoint_v1(checkpoint): keys = list(checkpoint.keys()) text_model_dict = {} for key in keys: if key.startswith('cond_stage_model.transformer'): text_model_dict[ key[len('cond_stage_model.transformer.') :] ] = checkpoint[key] return text_model_dict def convert_ldm_clip_checkpoint_v2(checkpoint, max_length): # 嫌になるくらい違うぞ! def convert_key(key): if not key.startswith('cond_stage_model'): return None # common conversion key = key.replace( 'cond_stage_model.model.transformer.', 'text_model.encoder.' ) key = key.replace('cond_stage_model.model.', 'text_model.') if 'resblocks' in key: # resblocks conversion key = key.replace('.resblocks.', '.layers.') if '.ln_' in key: key = key.replace('.ln_', '.layer_norm') elif '.mlp.' in key: key = key.replace('.c_fc.', '.fc1.') key = key.replace('.c_proj.', '.fc2.') elif '.attn.out_proj' in key: key = key.replace('.attn.out_proj.', '.self_attn.out_proj.') elif '.attn.in_proj' in key: key = None # 特殊なので後で処理する else: raise ValueError(f'unexpected key in SD: {key}') elif '.positional_embedding' in key: key = key.replace( '.positional_embedding', '.embeddings.position_embedding.weight', ) elif '.text_projection' in key: key = None # 使われない??? elif '.logit_scale' in key: key = None # 使われない??? elif '.token_embedding' in key: key = key.replace( '.token_embedding.weight', '.embeddings.token_embedding.weight' ) elif '.ln_final' in key: key = key.replace('.ln_final', '.final_layer_norm') return key keys = list(checkpoint.keys()) new_sd = {} for key in keys: # remove resblocks 23 if '.resblocks.23.' in key: continue new_key = convert_key(key) if new_key is None: continue new_sd[new_key] = checkpoint[key] # attnの変換 for key in keys: if '.resblocks.23.' in key: continue if '.resblocks' in key and '.attn.in_proj_' in key: # 三つに分割 values = torch.chunk(checkpoint[key], 3) key_suffix = '.weight' if 'weight' in key else '.bias' key_pfx = key.replace( 'cond_stage_model.model.transformer.resblocks.', 'text_model.encoder.layers.', ) key_pfx = key_pfx.replace('_weight', '') key_pfx = key_pfx.replace('_bias', '') key_pfx = key_pfx.replace('.attn.in_proj', '.self_attn.') new_sd[key_pfx + 'q_proj' + key_suffix] = values[0] new_sd[key_pfx + 'k_proj' + key_suffix] = values[1] new_sd[key_pfx + 'v_proj' + key_suffix] = values[2] # position_idsの追加 new_sd['text_model.embeddings.position_ids'] = torch.Tensor( [list(range(max_length))] ).to(torch.int64) return new_sd # endregion # region Diffusers->StableDiffusion の変換コード # convert_diffusers_to_original_stable_diffusion をコピーして修正している(ASL 2.0) def conv_transformer_to_linear(checkpoint): keys = list(checkpoint.keys()) tf_keys = ['proj_in.weight', 'proj_out.weight'] for key in keys: if '.'.join(key.split('.')[-2:]) in tf_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] def convert_unet_state_dict_to_sd(v2, unet_state_dict): unet_conversion_map = [ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] unet_conversion_map_resnet = [ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] unet_conversion_map_layer = [] for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks hf_down_res_prefix = f'down_blocks.{i}.resnets.{j}.' sd_down_res_prefix = f'input_blocks.{3*i + j + 1}.0.' unet_conversion_map_layer.append( (sd_down_res_prefix, hf_down_res_prefix) ) if i < 3: # no attention layers in down_blocks.3 hf_down_atn_prefix = f'down_blocks.{i}.attentions.{j}.' sd_down_atn_prefix = f'input_blocks.{3*i + j + 1}.1.' unet_conversion_map_layer.append( (sd_down_atn_prefix, hf_down_atn_prefix) ) for j in range(3): # loop over resnets/attentions for upblocks hf_up_res_prefix = f'up_blocks.{i}.resnets.{j}.' sd_up_res_prefix = f'output_blocks.{3*i + j}.0.' unet_conversion_map_layer.append( (sd_up_res_prefix, hf_up_res_prefix) ) if i > 0: # no attention layers in up_blocks.0 hf_up_atn_prefix = f'up_blocks.{i}.attentions.{j}.' sd_up_atn_prefix = f'output_blocks.{3*i + j}.1.' unet_conversion_map_layer.append( (sd_up_atn_prefix, hf_up_atn_prefix) ) if i < 3: # no downsample in down_blocks.3 hf_downsample_prefix = f'down_blocks.{i}.downsamplers.0.conv.' sd_downsample_prefix = f'input_blocks.{3*(i+1)}.0.op.' unet_conversion_map_layer.append( (sd_downsample_prefix, hf_downsample_prefix) ) # no upsample in up_blocks.3 hf_upsample_prefix = f'up_blocks.{i}.upsamplers.0.' sd_upsample_prefix = ( f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.' ) unet_conversion_map_layer.append( (sd_upsample_prefix, hf_upsample_prefix) ) hf_mid_atn_prefix = 'mid_block.attentions.0.' sd_mid_atn_prefix = 'middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): hf_mid_res_prefix = f'mid_block.resnets.{j}.' sd_mid_res_prefix = f'middle_block.{2*j}.' unet_conversion_map_layer.append( (sd_mid_res_prefix, hf_mid_res_prefix) ) # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. mapping = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: mapping[hf_name] = sd_name for k, v in mapping.items(): if 'resnets' in k: for sd_part, hf_part in unet_conversion_map_resnet: v = v.replace(hf_part, sd_part) mapping[k] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: v = v.replace(hf_part, sd_part) mapping[k] = v new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()} if v2: conv_transformer_to_linear(new_state_dict) return new_state_dict # ================# # VAE Conversion # # ================# def reshape_weight_for_sd(w): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape, 1, 1) def convert_vae_state_dict(vae_state_dict): vae_conversion_map = [ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): hf_down_prefix = f'encoder.down_blocks.{i}.resnets.{j}.' sd_down_prefix = f'encoder.down.{i}.block.{j}.' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: hf_downsample_prefix = f'down_blocks.{i}.downsamplers.0.' sd_downsample_prefix = f'down.{i}.downsample.' vae_conversion_map.append( (sd_downsample_prefix, hf_downsample_prefix) ) hf_upsample_prefix = f'up_blocks.{i}.upsamplers.0.' sd_upsample_prefix = f'up.{3-i}.upsample.' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): hf_up_prefix = f'decoder.up_blocks.{i}.resnets.{j}.' sd_up_prefix = f'decoder.up.{3-i}.block.{j}.' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): hf_mid_res_prefix = f'mid_block.resnets.{i}.' sd_mid_res_prefix = f'mid.block_{i+1}.' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) vae_conversion_map_attn = [ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] mapping = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: v = v.replace(hf_part, sd_part) mapping[k] = v for k, v in mapping.items(): if 'attentions' in k: for sd_part, hf_part in vae_conversion_map_attn: v = v.replace(hf_part, sd_part) mapping[k] = v new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()} weights_to_convert = ['q', 'k', 'v', 'proj_out'] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f'mid.attn_1.{weight_name}.weight' in k: # print(f"Reshaping {k} for SD format") new_state_dict[k] = reshape_weight_for_sd(v) return new_state_dict # endregion # region 自作のモデル読み書きなど def is_safetensors(path): return os.path.splitext(path)[1].lower() == '.safetensors' def load_checkpoint_with_text_encoder_conversion(ckpt_path): # text encoderの格納形式が違うモデルに対応する ('text_model'がない) TEXT_ENCODER_KEY_REPLACEMENTS = [ ( 'cond_stage_model.transformer.embeddings.', 'cond_stage_model.transformer.text_model.embeddings.', ), ( 'cond_stage_model.transformer.encoder.', 'cond_stage_model.transformer.text_model.encoder.', ), ( 'cond_stage_model.transformer.final_layer_norm.', 'cond_stage_model.transformer.text_model.final_layer_norm.', ), ] if is_safetensors(ckpt_path): checkpoint = None state_dict = load_file(ckpt_path, 'cpu') else: checkpoint = torch.load(ckpt_path, map_location='cpu') if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint checkpoint = None key_reps = [] for rep_from, rep_to in TEXT_ENCODER_KEY_REPLACEMENTS: for key in state_dict.keys(): if key.startswith(rep_from): new_key = rep_to + key[len(rep_from) :] key_reps.append((key, new_key)) for key, new_key in key_reps: state_dict[new_key] = state_dict[key] del state_dict[key] return checkpoint, state_dict # TODO dtype指定の動作が怪しいので確認する text_encoderを指定形式で作れるか未確認 def load_models_from_stable_diffusion_checkpoint(v2, ckpt_path, dtype=None): _, state_dict = load_checkpoint_with_text_encoder_conversion(ckpt_path) if dtype is not None: for k, v in state_dict.items(): if type(v) is torch.Tensor: state_dict[k] = v.to(dtype) # Convert the UNet2DConditionModel model. unet_config = create_unet_diffusers_config(v2) converted_unet_checkpoint = convert_ldm_unet_checkpoint( v2, state_dict, unet_config ) unet = UNet2DConditionModel(**unet_config) info = unet.load_state_dict(converted_unet_checkpoint) print('loading u-net:', info) # Convert the VAE model. vae_config = create_vae_diffusers_config() converted_vae_checkpoint = convert_ldm_vae_checkpoint( state_dict, vae_config ) vae = AutoencoderKL(**vae_config) info = vae.load_state_dict(converted_vae_checkpoint) print('loadint vae:', info) # convert text_model if v2: converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v2( state_dict, 77 ) cfg = CLIPTextConfig( vocab_size=49408, hidden_size=1024, intermediate_size=4096, num_hidden_layers=23, num_attention_heads=16, max_position_embeddings=77, hidden_act='gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, model_type='clip_text_model', projection_dim=512, torch_dtype='float32', transformers_version='4.25.0.dev0', ) text_model = CLIPTextModel._from_config(cfg) info = text_model.load_state_dict(converted_text_encoder_checkpoint) else: converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v1( state_dict ) text_model = CLIPTextModel.from_pretrained( 'openai/clip-vit-large-patch14' ) info = text_model.load_state_dict(converted_text_encoder_checkpoint) print('loading text encoder:', info) return text_model, vae, unet def convert_text_encoder_state_dict_to_sd_v2( checkpoint, make_dummy_weights=False ): def convert_key(key): # position_idsの除去 if '.position_ids' in key: return None # common key = key.replace('text_model.encoder.', 'transformer.') key = key.replace('text_model.', '') if 'layers' in key: # resblocks conversion key = key.replace('.layers.', '.resblocks.') if '.layer_norm' in key: key = key.replace('.layer_norm', '.ln_') elif '.mlp.' in key: key = key.replace('.fc1.', '.c_fc.') key = key.replace('.fc2.', '.c_proj.') elif '.self_attn.out_proj' in key: key = key.replace('.self_attn.out_proj.', '.attn.out_proj.') elif '.self_attn.' in key: key = None # 特殊なので後で処理する else: raise ValueError(f'unexpected key in DiffUsers model: {key}') elif '.position_embedding' in key: key = key.replace( 'embeddings.position_embedding.weight', 'positional_embedding' ) elif '.token_embedding' in key: key = key.replace( 'embeddings.token_embedding.weight', 'token_embedding.weight' ) elif 'final_layer_norm' in key: key = key.replace('final_layer_norm', 'ln_final') return key keys = list(checkpoint.keys()) new_sd = {} for key in keys: new_key = convert_key(key) if new_key is None: continue new_sd[new_key] = checkpoint[key] # attnの変換 for key in keys: if 'layers' in key and 'q_proj' in key: # 三つを結合 key_q = key key_k = key.replace('q_proj', 'k_proj') key_v = key.replace('q_proj', 'v_proj') value_q = checkpoint[key_q] value_k = checkpoint[key_k] value_v = checkpoint[key_v] value = torch.cat([value_q, value_k, value_v]) new_key = key.replace( 'text_model.encoder.layers.', 'transformer.resblocks.' ) new_key = new_key.replace('.self_attn.q_proj.', '.attn.in_proj_') new_sd[new_key] = value # 最後の層などを捏造するか if make_dummy_weights: print( 'make dummy weights for resblock.23, text_projection and logit scale.' ) keys = list(new_sd.keys()) for key in keys: if key.startswith('transformer.resblocks.22.'): new_sd[key.replace('.22.', '.23.')] = new_sd[ key ].clone() # copyしないとsafetensorsの保存で落ちる # Diffusersに含まれない重みを作っておく new_sd['text_projection'] = torch.ones( (1024, 1024), dtype=new_sd[keys[0]].dtype, device=new_sd[keys[0]].device, ) new_sd['logit_scale'] = torch.tensor(1) return new_sd def save_stable_diffusion_checkpoint( v2, output_file, text_encoder, unet, ckpt_path, epochs, steps, save_dtype=None, vae=None, ): if ckpt_path is not None: # epoch/stepを参照する。またVAEがメモリ上にないときなど、もう一度VAEを含めて読み込む checkpoint, state_dict = load_checkpoint_with_text_encoder_conversion( ckpt_path ) if checkpoint is None: # safetensors または state_dictのckpt checkpoint = {} strict = False else: strict = True if 'state_dict' in state_dict: del state_dict['state_dict'] else: # 新しく作る assert ( vae is not None ), 'VAE is required to save a checkpoint without a given checkpoint' checkpoint = {} state_dict = {} strict = False def update_sd(prefix, sd): for k, v in sd.items(): key = prefix + k assert ( not strict or key in state_dict ), f'Illegal key in save SD: {key}' if save_dtype is not None: v = v.detach().clone().to('cpu').to(save_dtype) state_dict[key] = v # Convert the UNet model unet_state_dict = convert_unet_state_dict_to_sd(v2, unet.state_dict()) update_sd('model.diffusion_model.', unet_state_dict) # Convert the text encoder model if v2: make_dummy = ( ckpt_path is None ) # 参照元のcheckpointがない場合は最後の層を前の層から複製して作るなどダミーの重みを入れる text_enc_dict = convert_text_encoder_state_dict_to_sd_v2( text_encoder.state_dict(), make_dummy ) update_sd('cond_stage_model.model.', text_enc_dict) else: text_enc_dict = text_encoder.state_dict() update_sd('cond_stage_model.transformer.', text_enc_dict) # Convert the VAE if vae is not None: vae_dict = convert_vae_state_dict(vae.state_dict()) update_sd('first_stage_model.', vae_dict) # Put together new checkpoint key_count = len(state_dict.keys()) new_ckpt = {'state_dict': state_dict} if 'epoch' in checkpoint: epochs += checkpoint['epoch'] if 'global_step' in checkpoint: steps += checkpoint['global_step'] new_ckpt['epoch'] = epochs new_ckpt['global_step'] = steps if is_safetensors(output_file): # TODO Tensor以外のdictの値を削除したほうがいいか save_file(state_dict, output_file) else: torch.save(new_ckpt, output_file) return key_count def save_diffusers_checkpoint( v2, output_dir, text_encoder, unet, pretrained_model_name_or_path, vae=None, use_safetensors=False, ): if pretrained_model_name_or_path is None: # load default settings for v1/v2 if v2: pretrained_model_name_or_path = DIFFUSERS_REF_MODEL_ID_V2 else: pretrained_model_name_or_path = DIFFUSERS_REF_MODEL_ID_V1 scheduler = DDIMScheduler.from_pretrained( pretrained_model_name_or_path, subfolder='scheduler' ) tokenizer = CLIPTokenizer.from_pretrained( pretrained_model_name_or_path, subfolder='tokenizer' ) if vae is None: vae = AutoencoderKL.from_pretrained( pretrained_model_name_or_path, subfolder='vae' ) pipeline = StableDiffusionPipeline( unet=unet, text_encoder=text_encoder, vae=vae, scheduler=scheduler, tokenizer=tokenizer, safety_checker=None, feature_extractor=None, requires_safety_checker=None, ) pipeline.save_pretrained(output_dir, safe_serialization=use_safetensors) VAE_PREFIX = 'first_stage_model.' def load_vae(vae_id, dtype): print(f'load VAE: {vae_id}') if os.path.isdir(vae_id) or not os.path.isfile(vae_id): # Diffusers local/remote try: vae = AutoencoderKL.from_pretrained( vae_id, subfolder=None, torch_dtype=dtype ) except EnvironmentError as e: print(f'exception occurs in loading vae: {e}') print("retry with subfolder='vae'") vae = AutoencoderKL.from_pretrained( vae_id, subfolder='vae', torch_dtype=dtype ) return vae # local vae_config = create_vae_diffusers_config() if vae_id.endswith('.bin'): # SD 1.5 VAE on Huggingface vae_sd = torch.load(vae_id, map_location='cpu') converted_vae_checkpoint = vae_sd else: # StableDiffusion vae_model = torch.load(vae_id, map_location='cpu') vae_sd = vae_model['state_dict'] # vae only or full model full_model = False for vae_key in vae_sd: if vae_key.startswith(VAE_PREFIX): full_model = True break if not full_model: sd = {} for key, value in vae_sd.items(): sd[VAE_PREFIX + key] = value vae_sd = sd del sd # Convert the VAE model. converted_vae_checkpoint = convert_ldm_vae_checkpoint( vae_sd, vae_config ) vae = AutoencoderKL(**vae_config) vae.load_state_dict(converted_vae_checkpoint) return vae def get_epoch_ckpt_name(use_safetensors, epoch): return f'epoch-{epoch:06d}' + ( '.safetensors' if use_safetensors else '.ckpt' ) def get_last_ckpt_name(use_safetensors): return f'last' + ('.safetensors' if use_safetensors else '.ckpt') # endregion def make_bucket_resolutions( max_reso, min_size=256, max_size=1024, divisible=64 ): max_width, max_height = max_reso max_area = (max_width // divisible) * (max_height // divisible) resos = set() size = int(math.sqrt(max_area)) * divisible resos.add((size, size)) size = min_size while size <= max_size: width = size height = min(max_size, (max_area // (width // divisible)) * divisible) resos.add((width, height)) resos.add((height, width)) # # make additional resos # if width >= height and width - divisible >= min_size: # resos.add((width - divisible, height)) # resos.add((height, width - divisible)) # if height >= width and height - divisible >= min_size: # resos.add((width, height - divisible)) # resos.add((height - divisible, width)) size += divisible resos = list(resos) resos.sort() aspect_ratios = [w / h for w, h in resos] return resos, aspect_ratios if __name__ == '__main__': resos, aspect_ratios = make_bucket_resolutions((512, 768)) print(len(resos)) print(resos) print(aspect_ratios) ars = set() for ar in aspect_ratios: if ar in ars: print('error! duplicate ar:', ar) ars.add(ar) library/dreambooth_folder_creation_gui.py METASEP import gradio as gr from easygui import diropenbox, msgbox from .common_gui import get_folder_path import shutil import os def copy_info_to_Directories_tab(training_folder): img_folder = os.path.join(training_folder, 'img') if os.path.exists(os.path.join(training_folder, 'reg')): reg_folder = os.path.join(training_folder, 'reg') else: reg_folder = '' model_folder = os.path.join(training_folder, 'model') log_folder = os.path.join(training_folder, 'log') return img_folder, reg_folder, model_folder, log_folder def dreambooth_folder_preparation( util_training_images_dir_input, util_training_images_repeat_input, util_instance_prompt_input, util_regularization_images_dir_input, util_regularization_images_repeat_input, util_class_prompt_input, util_training_dir_output, ): # Check if the input variables are empty if not len(util_training_dir_output): print( "Destination training directory is missing... can't perform the required task..." ) return else: # Create the util_training_dir_output directory if it doesn't exist os.makedirs(util_training_dir_output, exist_ok=True) # Check for instance prompt if util_instance_prompt_input == '': msgbox('Instance prompt missing...') return # Check for class prompt if util_class_prompt_input == '': msgbox('Class prompt missing...') return # Create the training_dir path if util_training_images_dir_input == '': print( "Training images directory is missing... can't perform the required task..." ) return else: training_dir = os.path.join( util_training_dir_output, f'img/{int(util_training_images_repeat_input)}_{util_instance_prompt_input} {util_class_prompt_input}', ) # Remove folders if they exist if os.path.exists(training_dir): print(f'Removing existing directory {training_dir}...') shutil.rmtree(training_dir) # Copy the training images to their respective directories print(f'Copy {util_training_images_dir_input} to {training_dir}...') shutil.copytree(util_training_images_dir_input, training_dir) # Create the regularization_dir path if ( util_class_prompt_input == '' or not util_regularization_images_repeat_input > 0 ): print( 'Regularization images directory or repeats is missing... not copying regularisation images...' ) else: regularization_dir = os.path.join( util_training_dir_output, f'reg/{int(util_regularization_images_repeat_input)}_{util_class_prompt_input}', ) # Remove folders if they exist if os.path.exists(regularization_dir): print(f'Removing existing directory {regularization_dir}...') shutil.rmtree(regularization_dir) # Copy the regularisation images to their respective directories print( f'Copy {util_regularization_images_dir_input} to {regularization_dir}...' ) shutil.copytree( util_regularization_images_dir_input, regularization_dir ) print( f'Done creating kohya_ss training folder structure at {util_training_dir_output}...' ) def gradio_dreambooth_folder_creation_tab( train_data_dir_input, reg_data_dir_input, output_dir_input, logging_dir_input, ): with gr.Tab('Dreambooth folder preparation'): gr.Markdown( 'This utility will create the necessary folder structure for the training images and optional regularization images needed for the kohys_ss Dreambooth method to function correctly.' ) with gr.Row(): util_instance_prompt_input = gr.Textbox( label='Instance prompt', placeholder='Eg: asd', interactive=True, ) util_class_prompt_input = gr.Textbox( label='Class prompt', placeholder='Eg: person', interactive=True, ) with gr.Row(): util_training_images_dir_input = gr.Textbox( label='Training images', placeholder='Directory containing the training images', interactive=True, ) button_util_training_images_dir_input = gr.Button( '📂', elem_id='open_folder_small' ) button_util_training_images_dir_input.click( get_folder_path, outputs=util_training_images_dir_input ) util_training_images_repeat_input = gr.Number( label='Repeats', value=40, interactive=True, elem_id='number_input', ) with gr.Row(): util_regularization_images_dir_input = gr.Textbox( label='Regularisation images', placeholder='(Optional) Directory containing the regularisation images', interactive=True, ) button_util_regularization_images_dir_input = gr.Button( '📂', elem_id='open_folder_small' ) button_util_regularization_images_dir_input.click( get_folder_path, outputs=util_regularization_images_dir_input ) util_regularization_images_repeat_input = gr.Number( label='Repeats', value=1, interactive=True, elem_id='number_input', ) with gr.Row(): util_training_dir_output = gr.Textbox( label='Destination training directory', placeholder='Directory where formatted training and regularisation folders will be placed', interactive=True, ) button_util_training_dir_output = gr.Button( '📂', elem_id='open_folder_small' ) button_util_training_dir_output.click( get_folder_path, outputs=util_training_dir_output ) button_prepare_training_data = gr.Button('Prepare training data') button_prepare_training_data.click( dreambooth_folder_preparation, inputs=[ util_training_images_dir_input, util_training_images_repeat_input, util_instance_prompt_input, util_regularization_images_dir_input, util_regularization_images_repeat_input, util_class_prompt_input, util_training_dir_output, ], ) button_copy_info_to_Directories_tab = gr.Button( 'Copy info to Directories Tab' ) button_copy_info_to_Directories_tab.click( copy_info_to_Directories_tab, inputs=[util_training_dir_output], outputs=[ train_data_dir_input, reg_data_dir_input, output_dir_input, logging_dir_input, ], ) library/dataset_balancing_gui.py METASEP import os import re import gradio as gr from easygui import msgbox, boolbox from .common_gui import get_folder_path # def select_folder(): # # Open a file dialog to select a directory # folder = filedialog.askdirectory() # # Update the GUI to display the selected folder # selected_folder_label.config(text=folder) def dataset_balancing(concept_repeats, folder, insecure): if not concept_repeats > 0: # Display an error message if the total number of repeats is not a valid integer msgbox('Please enter a valid integer for the total number of repeats.') return concept_repeats = int(concept_repeats) # Check if folder exist if folder == '' or not os.path.isdir(folder): msgbox('Please enter a valid folder for balancing.') return pattern = re.compile(r'^\d+_.+$') # Iterate over the subdirectories in the selected folder for subdir in os.listdir(folder): if pattern.match(subdir) or insecure: # Calculate the number of repeats for the current subdirectory # Get a list of all the files in the folder files = os.listdir(os.path.join(folder, subdir)) # Filter the list to include only image files image_files = [ f for f in files if f.endswith(('.jpg', '.jpeg', '.png', '.gif', '.webp')) ] # Count the number of image files images = len(image_files) # Check if the subdirectory name starts with a number inside braces, # indicating that the repeats value should be multiplied match = re.match(r'^\{(\d+\.?\d*)\}', subdir) if match: # Multiply the repeats value by the number inside the braces repeats = max( 1, round(concept_repeats / images * float(match.group(1))) ) subdir = subdir[match.end() :] else: repeats = max(1, round(concept_repeats / images)) # Check if the subdirectory name already has a number at the beginning match = re.match(r'^\d+_', subdir) if match: # Replace the existing number with the new number old_name = os.path.join(folder, subdir) new_name = os.path.join( folder, f'{repeats}_{subdir[match.end():]}' ) else: # Add the new number at the beginning of the name old_name = os.path.join(folder, subdir) new_name = os.path.join(folder, f'{repeats}_{subdir}') os.rename(old_name, new_name) else: print( f'Skipping folder {subdir} because it does not match kohya_ss expected syntax...' ) msgbox('Dataset balancing completed...') def warning(insecure): if insecure: if boolbox( f'WARNING!!! You have asked to rename non kohya_ss <num>_<text> folders...\n\nAre you sure you want to do that?', choices=('Yes, I like danger', 'No, get me out of here'), ): return True else: return False def gradio_dataset_balancing_tab(): with gr.Tab('Dataset balancing'): gr.Markdown( 'This utility will ensure that each concept folder in the dataset folder is used equally during the training process of the dreambooth machine learning model, regardless of the number of images in each folder. It will do this by renaming the concept folders to indicate the number of times they should be repeated during training.' ) gr.Markdown( 'WARNING! The use of this utility on the wrong folder can lead to unexpected folder renaming!!!' ) with gr.Row(): select_dataset_folder_input = gr.Textbox( label='Dataset folder', placeholder='Folder containing the concepts folders to balance...', interactive=True, ) select_dataset_folder_button = gr.Button( '📂', elem_id='open_folder_small' ) select_dataset_folder_button.click( get_folder_path, outputs=select_dataset_folder_input ) total_repeats_number = gr.Number( value=1000, interactive=True, label='Training steps per concept per epoch', ) with gr.Accordion('Advanced options', open=False): insecure = gr.Checkbox( value=False, label='DANGER!!! -- Insecure folder renaming -- DANGER!!!', ) insecure.change(warning, inputs=insecure, outputs=insecure) balance_button = gr.Button('Balance dataset') balance_button.click( dataset_balancing, inputs=[ total_repeats_number, select_dataset_folder_input, insecure, ], ) library/convert_model_gui.py METASEP import gradio as gr from easygui import msgbox import subprocess import os import shutil from .common_gui import get_folder_path, get_file_path folder_symbol = '\U0001f4c2' # 📂 refresh_symbol = '\U0001f504' # 🔄 save_style_symbol = '\U0001f4be' # 💾 document_symbol = '\U0001F4C4' # 📄 def convert_model( source_model_input, source_model_type, target_model_folder_input, target_model_name_input, target_model_type, target_save_precision_type, ): # Check for caption_text_input if source_model_type == '': msgbox('Invalid source model type') return # Check if source model exist if os.path.isfile(source_model_input): print('The provided source model is a file') elif os.path.isdir(source_model_input): print('The provided model is a folder') else: msgbox('The provided source model is neither a file nor a folder') return # Check if source model exist if os.path.isdir(target_model_folder_input): print('The provided model folder exist') else: msgbox('The provided target folder does not exist') return run_cmd = f'.\\venv\Scripts\python.exe "tools/convert_diffusers20_original_sd.py"' v1_models = [ 'runwayml/stable-diffusion-v1-5', 'CompVis/stable-diffusion-v1-4', ] # check if v1 models if str(source_model_type) in v1_models: print('SD v1 model specified. Setting --v1 parameter') run_cmd += ' --v1' else: print('SD v2 model specified. Setting --v2 parameter') run_cmd += ' --v2' if not target_save_precision_type == 'unspecified': run_cmd += f' --{target_save_precision_type}' if ( target_model_type == 'diffuser' or target_model_type == 'diffuser_safetensors' ): run_cmd += f' --reference_model="{source_model_type}"' if target_model_type == 'diffuser_safetensors': run_cmd += ' --use_safetensors' run_cmd += f' "{source_model_input}"' if ( target_model_type == 'diffuser' or target_model_type == 'diffuser_safetensors' ): target_model_path = os.path.join( target_model_folder_input, target_model_name_input ) run_cmd += f' "{target_model_path}"' else: target_model_path = os.path.join( target_model_folder_input, f'{target_model_name_input}.{target_model_type}', ) run_cmd += f' "{target_model_path}"' print(run_cmd) # Run the command subprocess.run(run_cmd) if ( not target_model_type == 'diffuser' or target_model_type == 'diffuser_safetensors' ): v2_models = [ 'stabilityai/stable-diffusion-2-1-base', 'stabilityai/stable-diffusion-2-base', ] v_parameterization = [ 'stabilityai/stable-diffusion-2-1', 'stabilityai/stable-diffusion-2', ] if str(source_model_type) in v2_models: inference_file = os.path.join( target_model_folder_input, f'{target_model_name_input}.yaml' ) print(f'Saving v2-inference.yaml as {inference_file}') shutil.copy( f'./v2_inference/v2-inference.yaml', f'{inference_file}', ) if str(source_model_type) in v_parameterization: inference_file = os.path.join( target_model_folder_input, f'{target_model_name_input}.yaml' ) print(f'Saving v2-inference-v.yaml as {inference_file}') shutil.copy( f'./v2_inference/v2-inference-v.yaml', f'{inference_file}', ) # parser = argparse.ArgumentParser() # parser.add_argument("--v1", action='store_true', # help='load v1.x model (v1 or v2 is required to load checkpoint) / 1.xのモデルを読み込む') # parser.add_argument("--v2", action='store_true', # help='load v2.0 model (v1 or v2 is required to load checkpoint) / 2.0のモデルを読み込む') # parser.add_argument("--fp16", action='store_true', # help='load as fp16 (Diffusers only) and save as fp16 (checkpoint only) / fp16形式で読み込み(Diffusers形式のみ対応)、保存する(checkpointのみ対応)') # parser.add_argument("--bf16", action='store_true', help='save as bf16 (checkpoint only) / bf16形式で保存する(checkpointのみ対応)') # parser.add_argument("--float", action='store_true', # help='save as float (checkpoint only) / float(float32)形式で保存する(checkpointのみ対応)') # parser.add_argument("--epoch", type=int, default=0, help='epoch to write to checkpoint / checkpointに記録するepoch数の値') # parser.add_argument("--global_step", type=int, default=0, # help='global_step to write to checkpoint / checkpointに記録するglobal_stepの値') # parser.add_argument("--reference_model", type=str, default=None, # help="reference model for schduler/tokenizer, required in saving Diffusers, copy schduler/tokenizer from this / scheduler/tokenizerのコピー元のDiffusersモデル、Diffusers形式で保存するときに必要") # parser.add_argument("model_to_load", type=str, default=None, # help="model to load: checkpoint file or Diffusers model's directory / 読み込むモデル、checkpointかDiffusers形式モデルのディレクトリ") # parser.add_argument("model_to_save", type=str, default=None, # help="model to save: checkpoint (with extension) or Diffusers model's directory (without extension) / 変換後のモデル、拡張子がある場合はcheckpoint、ない場合はDiffusesモデルとして保存") ### # Gradio UI ### def gradio_convert_model_tab(): with gr.Tab('Convert model'): gr.Markdown( 'This utility can be used to convert from one stable diffusion model format to another.' ) with gr.Row(): source_model_input = gr.Textbox( label='Source model', placeholder='path to source model folder of file to convert...', interactive=True, ) button_source_model_dir = gr.Button( folder_symbol, elem_id='open_folder_small' ) button_source_model_dir.click( get_folder_path, outputs=source_model_input ) button_source_model_file = gr.Button( document_symbol, elem_id='open_folder_small' ) button_source_model_file.click( get_file_path, inputs=[source_model_input], outputs=source_model_input, ) source_model_type = gr.Dropdown( label='Source model type', choices=[ 'stabilityai/stable-diffusion-2-1-base', 'stabilityai/stable-diffusion-2-base', 'stabilityai/stable-diffusion-2-1', 'stabilityai/stable-diffusion-2', 'runwayml/stable-diffusion-v1-5', 'CompVis/stable-diffusion-v1-4', ], ) with gr.Row(): target_model_folder_input = gr.Textbox( label='Target model folder', placeholder='path to target model folder of file name to create...', interactive=True, ) button_target_model_folder = gr.Button( folder_symbol, elem_id='open_folder_small' ) button_target_model_folder.click( get_folder_path, outputs=target_model_folder_input ) target_model_name_input = gr.Textbox( label='Target model name', placeholder='target model name...', interactive=True, ) target_model_type = gr.Dropdown( label='Target model type', choices=[ 'diffuser', 'diffuser_safetensors', 'ckpt', 'safetensors', ], ) target_save_precision_type = gr.Dropdown( label='Target model precison', choices=['unspecified', 'fp16', 'bf16', 'float'], value='unspecified', ) convert_button = gr.Button('Convert model') convert_button.click( convert_model, inputs=[ source_model_input, source_model_type, target_model_folder_input, target_model_name_input, target_model_type, target_save_precision_type, ], ) library/common_gui.py METASEP from tkinter import filedialog, Tk import os def get_file_path(file_path='', defaultextension='.json'): current_file_path = file_path # print(f'current file path: {current_file_path}') root = Tk() root.wm_attributes('-topmost', 1) root.withdraw() file_path = filedialog.askopenfilename( filetypes=(('Config files', '*.json'), ('All files', '*')), defaultextension=defaultextension, ) root.destroy() if file_path == '': file_path = current_file_path return file_path def remove_doublequote(file_path): if file_path != None: file_path = file_path.replace('"', '') return file_path def get_folder_path(folder_path=''): current_folder_path = folder_path root = Tk() root.wm_attributes('-topmost', 1) root.withdraw() folder_path = filedialog.askdirectory() root.destroy() if folder_path == '': folder_path = current_folder_path return folder_path def get_saveasfile_path(file_path='', defaultextension='.json'): current_file_path = file_path # print(f'current file path: {current_file_path}') root = Tk() root.wm_attributes('-topmost', 1) root.withdraw() save_file_path = filedialog.asksaveasfile( filetypes=(('Config files', '*.json'), ('All files', '*')), defaultextension=defaultextension, ) root.destroy() # print(save_file_path) if save_file_path == None: file_path = current_file_path else: print(save_file_path.name) file_path = save_file_path.name # print(file_path) return file_path def add_pre_postfix( folder='', prefix='', postfix='', caption_file_ext='.caption' ): files = [f for f in os.listdir(folder) if f.endswith(caption_file_ext)] if not prefix == '': prefix = f'{prefix} ' if not postfix == '': postfix = f' {postfix}' for file in files: with open(os.path.join(folder, file), 'r+') as f: content = f.read() content = content.rstrip() f.seek(0, 0) f.write(f'{prefix}{content}{postfix}') f.close() library/blip_caption_gui.py METASEP import gradio as gr from easygui import msgbox import subprocess import os from .common_gui import get_folder_path, add_pre_postfix def caption_images( train_data_dir, caption_file_ext, batch_size, num_beams, top_p, max_length, min_length, beam_search, prefix, postfix, ): # Check for caption_text_input # if caption_text_input == "": # msgbox("Caption text is missing...") # return # Check for images_dir_input if train_data_dir == '': msgbox('Image folder is missing...') return print(f'Captioning files in {train_data_dir}...') run_cmd = f'.\\venv\\Scripts\\python.exe "./BLIP_caption/make_captions.py"' run_cmd += f' --batch_size="{int(batch_size)}"' run_cmd += f' --num_beams="{int(num_beams)}"' run_cmd += f' --top_p="{top_p}"' run_cmd += f' --max_length="{int(max_length)}"' run_cmd += f' --min_length="{int(min_length)}"' if beam_search: run_cmd += f' --beam_search' if caption_file_ext != '': run_cmd += f' --caption_extension="{caption_file_ext}"' run_cmd += f' "{train_data_dir}"' run_cmd += f' "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth"' print(run_cmd) # Run the command subprocess.run(run_cmd) # Add prefix and postfix add_pre_postfix( folder=train_data_dir, caption_file_ext=caption_file_ext, prefix=prefix, postfix=postfix, ) print('...captioning done') ### # Gradio UI ### def gradio_blip_caption_gui_tab(): with gr.Tab('BLIP Captioning'): gr.Markdown( 'This utility will use BLIP to caption files for each images in a folder.' ) with gr.Row(): train_data_dir = gr.Textbox( label='Image folder to caption', placeholder='Directory containing the images to caption', interactive=True, ) button_train_data_dir_input = gr.Button( '📂', elem_id='open_folder_small' ) button_train_data_dir_input.click( get_folder_path, outputs=train_data_dir ) with gr.Row(): caption_file_ext = gr.Textbox( label='Caption file extension', placeholder='(Optional) Default: .caption', interactive=True, ) prefix = gr.Textbox( label='Prefix to add to BLIP caption', placeholder='(Optional)', interactive=True, ) postfix = gr.Textbox( label='Postfix to add to BLIP caption', placeholder='(Optional)', interactive=True, ) batch_size = gr.Number( value=1, label='Batch size', interactive=True ) with gr.Row(): beam_search = gr.Checkbox( label='Use beam search', interactive=True, value=True ) num_beams = gr.Number( value=1, label='Number of beams', interactive=True ) top_p = gr.Number(value=0.9, label='Top p', interactive=True) max_length = gr.Number( value=75, label='Max length', interactive=True ) min_length = gr.Number( value=5, label='Min length', interactive=True ) caption_button = gr.Button('Caption images') caption_button.click( caption_images, inputs=[ train_data_dir, caption_file_ext, batch_size, num_beams, top_p, max_length, min_length, beam_search, prefix, postfix, ], ) library/basic_caption_gui.py METASEP import gradio as gr from easygui import msgbox import subprocess from .common_gui import get_folder_path, add_pre_postfix def caption_images( caption_text_input, images_dir_input, overwrite_input, caption_file_ext, prefix, postfix, ): # Check for images_dir_input if images_dir_input == '': msgbox('Image folder is missing...') return if not caption_text_input == '': print( f'Captioning files in {images_dir_input} with {caption_text_input}...' ) run_cmd = f'python "tools/caption.py"' run_cmd += f' --caption_text="{caption_text_input}"' if overwrite_input: run_cmd += f' --overwrite' if caption_file_ext != '': run_cmd += f' --caption_file_ext="{caption_file_ext}"' run_cmd += f' "{images_dir_input}"' print(run_cmd) # Run the command subprocess.run(run_cmd) if overwrite_input: # Add prefix and postfix add_pre_postfix( folder=images_dir_input, caption_file_ext=caption_file_ext, prefix=prefix, postfix=postfix, ) else: if not prefix == '' or not postfix == '': msgbox( 'Could not modify caption files with requested change because the "Overwrite existing captions in folder" option is not selected...' ) print('...captioning done') ### # Gradio UI ### def gradio_basic_caption_gui_tab(): with gr.Tab('Basic Captioning'): gr.Markdown( 'This utility will allow the creation of simple caption files for each images in a folder.' ) with gr.Row(): images_dir_input = gr.Textbox( label='Image folder to caption', placeholder='Directory containing the images to caption', interactive=True, ) button_images_dir_input = gr.Button( '📂', elem_id='open_folder_small' ) button_images_dir_input.click( get_folder_path, outputs=images_dir_input ) with gr.Row(): prefix = gr.Textbox( label='Prefix to add to txt caption', placeholder='(Optional)', interactive=True, ) caption_text_input = gr.Textbox( label='Caption text', placeholder='Eg: , by some artist. Leave empti if you just want to add pre or postfix', interactive=True, ) postfix = gr.Textbox( label='Postfix to add to txt caption', placeholder='(Optional)', interactive=True, ) with gr.Row(): overwrite_input = gr.Checkbox( label='Overwrite existing captions in folder', interactive=True, value=False, ) caption_file_ext = gr.Textbox( label='Caption file extension', placeholder='(Optional) Default: .caption', interactive=True, ) caption_button = gr.Button('Caption images') caption_button.click( caption_images, inputs=[ caption_text_input, images_dir_input, overwrite_input, caption_file_ext, prefix, postfix, ], ) library/__init__.py METASEP bitsandbytes_windows/main.py METASEP """ extract factors the build is dependent on: [X] compute capability [ ] TODO: Q - What if we have multiple GPUs of different makes? - CUDA version - Software: - CPU-only: only CPU quantization functions (no optimizer, no matrix multipl) - CuBLAS-LT: full-build 8-bit optimizer - no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`) evaluation: - if paths faulty, return meaningful error - else: - determine CUDA version - determine capabilities - based on that set the default path """ import ctypes from .paths import determine_cuda_runtime_lib_path def check_cuda_result(cuda, result_val): # 3. Check for CUDA errors if result_val != 0: error_str = ctypes.c_char_p() cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) print(f"CUDA exception! Error code: {error_str.value.decode()}") def get_cuda_version(cuda, cudart_path): # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION try: cudart = ctypes.CDLL(cudart_path) except OSError: # TODO: shouldn't we error or at least warn here? print(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') return None version = ctypes.c_int() check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version))) version = int(version.value) major = version//1000 minor = (version-(major*1000))//10 if major < 11: print('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') return f'{major}{minor}' def get_cuda_lib_handle(): # 1. find libcuda.so library (GPU driver) (/usr/lib) try: cuda = ctypes.CDLL("libcuda.so") except OSError: # TODO: shouldn't we error or at least warn here? print('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') return None check_cuda_result(cuda, cuda.cuInit(0)) return cuda def get_compute_capabilities(cuda): """ 1. find libcuda.so library (GPU driver) (/usr/lib) init_device -> init variables -> call function by reference 2. call extern C function to determine CC (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html) 3. Check for CUDA errors https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api # bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549 """ nGpus = ctypes.c_int() cc_major = ctypes.c_int() cc_minor = ctypes.c_int() device = ctypes.c_int() check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus))) ccs = [] for i in range(nGpus.value): check_cuda_result(cuda, cuda.cuDeviceGet(ctypes.byref(device), i)) ref_major = ctypes.byref(cc_major) ref_minor = ctypes.byref(cc_minor) # 2. call extern C function to determine CC check_cuda_result( cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device) ) ccs.append(f"{cc_major.value}.{cc_minor.value}") return ccs # def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error def get_compute_capability(cuda): """ Extracts the highest compute capbility from all available GPUs, as compute capabilities are downwards compatible. If no GPUs are detected, it returns None. """ ccs = get_compute_capabilities(cuda) if ccs is not None: # TODO: handle different compute capabilities; for now, take the max return ccs[-1] return None def evaluate_cuda_setup(): print('') print('='*35 + 'BUG REPORT' + '='*35) print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') print('='*80) return "libbitsandbytes_cuda116.dll" # $$$ binary_name = "libbitsandbytes_cpu.so" #if not torch.cuda.is_available(): #print('No GPU detected. Loading CPU library...') #return binary_name cudart_path = determine_cuda_runtime_lib_path() if cudart_path is None: print( "WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!" ) return binary_name print(f"CUDA SETUP: CUDA runtime path found: {cudart_path}") cuda = get_cuda_lib_handle() cc = get_compute_capability(cuda) print(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}") cuda_version_string = get_cuda_version(cuda, cudart_path) if cc == '': print( "WARNING: No GPU detected! Check your CUDA paths. Processing to load CPU-only library..." ) return binary_name # 7.5 is the minimum CC vor cublaslt has_cublaslt = cc in ["7.5", "8.0", "8.6"] # TODO: # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) # (2) Multiple CUDA versions installed # we use ls -l instead of nvcc to determine the cuda version # since most installations will have the libcudart.so installed, but not the compiler print(f'CUDA SETUP: Detected CUDA version {cuda_version_string}') def get_binary_name(): "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" bin_base_name = "libbitsandbytes_cuda" if has_cublaslt: return f"{bin_base_name}{cuda_version_string}.so" else: return f"{bin_base_name}{cuda_version_string}_nocublaslt.so" binary_name = get_binary_name() return binary_name bitsandbytes_windows/cextension.py METASEP import ctypes as ct from pathlib import Path from warnings import warn from .cuda_setup.main import evaluate_cuda_setup class CUDALibrary_Singleton(object): _instance = None def __init__(self): raise RuntimeError("Call get_instance() instead") def initialize(self): binary_name = evaluate_cuda_setup() package_dir = Path(__file__).parent binary_path = package_dir / binary_name if not binary_path.exists(): print(f"CUDA SETUP: TODO: compile library for specific version: {binary_name}") legacy_binary_name = "libbitsandbytes.so" print(f"CUDA SETUP: Defaulting to {legacy_binary_name}...") binary_path = package_dir / legacy_binary_name if not binary_path.exists(): print('CUDA SETUP: CUDA detection failed. Either CUDA driver not installed, CUDA not installed, or you have multiple conflicting CUDA libraries!') print('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.') raise Exception('CUDA SETUP: Setup Failed!') # self.lib = ct.cdll.LoadLibrary(binary_path) self.lib = ct.cdll.LoadLibrary(str(binary_path)) # $$$ else: print(f"CUDA SETUP: Loading binary {binary_path}...") # self.lib = ct.cdll.LoadLibrary(binary_path) self.lib = ct.cdll.LoadLibrary(str(binary_path)) # $$$ @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls.__new__(cls) cls._instance.initialize() return cls._instance lib = CUDALibrary_Singleton.get_instance().lib try: lib.cadam32bit_g32 lib.get_context.restype = ct.c_void_p lib.get_cusparse.restype = ct.c_void_p COMPILED_WITH_CUDA = True except AttributeError: warn( "The installed version of bitsandbytes was compiled without GPU support. " "8-bit optimizers and GPU quantization are unavailable." ) COMPILED_WITH_CUDA = False setup.py METASEP from setuptools import setup, find_packages setup(name = "library", packages = find_packages()) dreambooth_gui.py METASEP # v1: initial release # v2: add open and save folder icons # v3: Add new Utilities tab for Dreambooth folder preparation # v3.1: Adding captionning of images to utilities import gradio as gr import json import math import os import subprocess import pathlib import shutil from library.dreambooth_folder_creation_gui import ( gradio_dreambooth_folder_creation_tab, ) from library.basic_caption_gui import gradio_basic_caption_gui_tab from library.convert_model_gui import gradio_convert_model_tab from library.blip_caption_gui import gradio_blip_caption_gui_tab from library.wd14_caption_gui import gradio_wd14_caption_gui_tab from library.dataset_balancing_gui import gradio_dataset_balancing_tab from library.common_gui import ( get_folder_path, remove_doublequote, get_file_path, get_saveasfile_path, ) from easygui import msgbox folder_symbol = '\U0001f4c2' # 📂 refresh_symbol = '\U0001f504' # 🔄 save_style_symbol = '\U0001f4be' # 💾 document_symbol = '\U0001F4C4' # 📄 def save_configuration( save_as, file_path, pretrained_model_name_or_path, v2, v_parameterization, logging_dir, train_data_dir, reg_data_dir, output_dir, max_resolution, learning_rate, lr_scheduler, lr_warmup, train_batch_size, epoch, save_every_n_epochs, mixed_precision, save_precision, seed, num_cpu_threads_per_process, cache_latent, caption_extention, enable_bucket, gradient_checkpointing, full_fp16, no_token_padding, stop_text_encoder_training, use_8bit_adam, xformers, save_model_as, shuffle_caption, save_state, resume, prior_loss_weight, ): original_file_path = file_path save_as_bool = True if save_as.get('label') == 'True' else False if save_as_bool: print('Save as...') file_path = get_saveasfile_path(file_path) else: print('Save...') if file_path == None or file_path == '': file_path = get_saveasfile_path(file_path) # print(file_path) if file_path == None or file_path == '': return original_file_path # In case a file_path was provided and the user decide to cancel the open action # Return the values of the variables as a dictionary variables = { 'pretrained_model_name_or_path': pretrained_model_name_or_path, 'v2': v2, 'v_parameterization': v_parameterization, 'logging_dir': logging_dir, 'train_data_dir': train_data_dir, 'reg_data_dir': reg_data_dir, 'output_dir': output_dir, 'max_resolution': max_resolution, 'learning_rate': learning_rate, 'lr_scheduler': lr_scheduler, 'lr_warmup': lr_warmup, 'train_batch_size': train_batch_size, 'epoch': epoch, 'save_every_n_epochs': save_every_n_epochs, 'mixed_precision': mixed_precision, 'save_precision': save_precision, 'seed': seed, 'num_cpu_threads_per_process': num_cpu_threads_per_process, 'cache_latent': cache_latent, 'caption_extention': caption_extention, 'enable_bucket': enable_bucket, 'gradient_checkpointing': gradient_checkpointing, 'full_fp16': full_fp16, 'no_token_padding': no_token_padding, 'stop_text_encoder_training': stop_text_encoder_training, 'use_8bit_adam': use_8bit_adam, 'xformers': xformers, 'save_model_as': save_model_as, 'shuffle_caption': shuffle_caption, 'save_state': save_state, 'resume': resume, 'prior_loss_weight': prior_loss_weight, } # Save the data to the selected file with open(file_path, 'w') as file: json.dump(variables, file) return file_path def open_configuration( file_path, pretrained_model_name_or_path, v2, v_parameterization, logging_dir, train_data_dir, reg_data_dir, output_dir, max_resolution, learning_rate, lr_scheduler, lr_warmup, train_batch_size, epoch, save_every_n_epochs, mixed_precision, save_precision, seed, num_cpu_threads_per_process, cache_latent, caption_extention, enable_bucket, gradient_checkpointing, full_fp16, no_token_padding, stop_text_encoder_training, use_8bit_adam, xformers, save_model_as, shuffle_caption, save_state, resume, prior_loss_weight, ): original_file_path = file_path file_path = get_file_path(file_path) # print(file_path) if not file_path == '' and not file_path == None: # load variables from JSON file with open(file_path, 'r') as f: my_data = json.load(f) else: file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action my_data = {} # Return the values of the variables as a dictionary return ( file_path, my_data.get( 'pretrained_model_name_or_path', pretrained_model_name_or_path ), my_data.get('v2', v2), my_data.get('v_parameterization', v_parameterization), my_data.get('logging_dir', logging_dir), my_data.get('train_data_dir', train_data_dir), my_data.get('reg_data_dir', reg_data_dir), my_data.get('output_dir', output_dir), my_data.get('max_resolution', max_resolution), my_data.get('learning_rate', learning_rate), my_data.get('lr_scheduler', lr_scheduler), my_data.get('lr_warmup', lr_warmup), my_data.get('train_batch_size', train_batch_size), my_data.get('epoch', epoch), my_data.get('save_every_n_epochs', save_every_n_epochs), my_data.get('mixed_precision', mixed_precision), my_data.get('save_precision', save_precision), my_data.get('seed', seed), my_data.get( 'num_cpu_threads_per_process', num_cpu_threads_per_process ), my_data.get('cache_latent', cache_latent), my_data.get('caption_extention', caption_extention), my_data.get('enable_bucket', enable_bucket), my_data.get('gradient_checkpointing', gradient_checkpointing), my_data.get('full_fp16', full_fp16), my_data.get('no_token_padding', no_token_padding), my_data.get('stop_text_encoder_training', stop_text_encoder_training), my_data.get('use_8bit_adam', use_8bit_adam), my_data.get('xformers', xformers), my_data.get('save_model_as', save_model_as), my_data.get('shuffle_caption', shuffle_caption), my_data.get('save_state', save_state), my_data.get('resume', resume), my_data.get('prior_loss_weight', prior_loss_weight), ) def train_model( pretrained_model_name_or_path, v2, v_parameterization, logging_dir, train_data_dir, reg_data_dir, output_dir, max_resolution, learning_rate, lr_scheduler, lr_warmup, train_batch_size, epoch, save_every_n_epochs, mixed_precision, save_precision, seed, num_cpu_threads_per_process, cache_latent, caption_extention, enable_bucket, gradient_checkpointing, full_fp16, no_token_padding, stop_text_encoder_training_pct, use_8bit_adam, xformers, save_model_as, shuffle_caption, save_state, resume, prior_loss_weight, ): def save_inference_file(output_dir, v2, v_parameterization): # Copy inference model for v2 if required if v2 and v_parameterization: print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml') shutil.copy( f'./v2_inference/v2-inference-v.yaml', f'{output_dir}/last.yaml', ) elif v2: print(f'Saving v2-inference.yaml as {output_dir}/last.yaml') shutil.copy( f'./v2_inference/v2-inference.yaml', f'{output_dir}/last.yaml', ) if pretrained_model_name_or_path == '': msgbox('Source model information is missing') return if train_data_dir == '': msgbox('Image folder path is missing') return if not os.path.exists(train_data_dir): msgbox('Image folder does not exist') return if reg_data_dir != '': if not os.path.exists(reg_data_dir): msgbox('Regularisation folder does not exist') return if output_dir == '': msgbox('Output folder path is missing') return # Get a list of all subfolders in train_data_dir subfolders = [ f for f in os.listdir(train_data_dir) if os.path.isdir(os.path.join(train_data_dir, f)) ] total_steps = 0 # Loop through each subfolder and extract the number of repeats for folder in subfolders: # Extract the number of repeats from the folder name repeats = int(folder.split('_')[0]) # Count the number of images in the folder num_images = len( [ f for f in os.listdir(os.path.join(train_data_dir, folder)) if f.endswith('.jpg') or f.endswith('.jpeg') or f.endswith('.png') or f.endswith('.webp') ] ) # Calculate the total number of steps for this folder steps = repeats * num_images total_steps += steps # Print the result print(f'Folder {folder}: {steps} steps') # Print the result # print(f"{total_steps} total steps") if reg_data_dir == '': reg_factor = 1 else: print( 'Regularisation images are used... Will double the number of steps required...' ) reg_factor = 2 # calculate max_train_steps max_train_steps = int( math.ceil( float(total_steps) / int(train_batch_size) * int(epoch) * int(reg_factor) ) ) print(f'max_train_steps = {max_train_steps}') # calculate stop encoder training if stop_text_encoder_training_pct == None: stop_text_encoder_training = 0 else: stop_text_encoder_training = math.ceil( float(max_train_steps) / 100 * int(stop_text_encoder_training_pct) ) print(f'stop_text_encoder_training = {stop_text_encoder_training}') lr_warmup_steps = round(float(int(lr_warmup) * int(max_train_steps) / 100)) print(f'lr_warmup_steps = {lr_warmup_steps}') run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} "train_db_fixed.py"' if v2: run_cmd += ' --v2' if v_parameterization: run_cmd += ' --v_parameterization' if cache_latent: run_cmd += ' --cache_latents' if enable_bucket: run_cmd += ' --enable_bucket' if gradient_checkpointing: run_cmd += ' --gradient_checkpointing' if full_fp16: run_cmd += ' --full_fp16' if no_token_padding: run_cmd += ' --no_token_padding' if use_8bit_adam: run_cmd += ' --use_8bit_adam' if xformers: run_cmd += ' --xformers' if shuffle_caption: run_cmd += ' --shuffle_caption' if save_state: run_cmd += ' --save_state' run_cmd += ( f' --pretrained_model_name_or_path={pretrained_model_name_or_path}' ) run_cmd += f' --train_data_dir="{train_data_dir}"' if len(reg_data_dir): run_cmd += f' --reg_data_dir="{reg_data_dir}"' run_cmd += f' --resolution={max_resolution}' run_cmd += f' --output_dir={output_dir}' run_cmd += f' --train_batch_size={train_batch_size}' run_cmd += f' --learning_rate={learning_rate}' run_cmd += f' --lr_scheduler={lr_scheduler}' run_cmd += f' --lr_warmup_steps={lr_warmup_steps}' run_cmd += f' --max_train_steps={max_train_steps}' run_cmd += f' --use_8bit_adam' run_cmd += f' --xformers' run_cmd += f' --mixed_precision={mixed_precision}' run_cmd += f' --save_every_n_epochs={save_every_n_epochs}' run_cmd += f' --seed={seed}' run_cmd += f' --save_precision={save_precision}' run_cmd += f' --logging_dir={logging_dir}' run_cmd += f' --caption_extention={caption_extention}' if not stop_text_encoder_training == 0: run_cmd += ( f' --stop_text_encoder_training={stop_text_encoder_training}' ) if not save_model_as == 'same as source model': run_cmd += f' --save_model_as={save_model_as}' if not resume == '': run_cmd += f' --resume={resume}' if not float(prior_loss_weight) == 1.0: run_cmd += f' --prior_loss_weight={prior_loss_weight}' print(run_cmd) # Run the command subprocess.run(run_cmd) # check if output_dir/last is a folder... therefore it is a diffuser model last_dir = pathlib.Path(f'{output_dir}/last') if not last_dir.is_dir(): # Copy inference model for v2 if required save_inference_file(output_dir, v2, v_parameterization) def set_pretrained_model_name_or_path_input(value, v2, v_parameterization): # define a list of substrings to search for substrings_v2 = [ 'stabilityai/stable-diffusion-2-1-base', 'stabilityai/stable-diffusion-2-base', ] # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list if str(value) in substrings_v2: print('SD v2 model detected. Setting --v2 parameter') v2 = True v_parameterization = False return value, v2, v_parameterization # define a list of substrings to search for v-objective substrings_v_parameterization = [ 'stabilityai/stable-diffusion-2-1', 'stabilityai/stable-diffusion-2', ] # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list if str(value) in substrings_v_parameterization: print( 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization' ) v2 = True v_parameterization = True return value, v2, v_parameterization # define a list of substrings to v1.x substrings_v1_model = [ 'CompVis/stable-diffusion-v1-4', 'runwayml/stable-diffusion-v1-5', ] if str(value) in substrings_v1_model: v2 = False v_parameterization = False return value, v2, v_parameterization if value == 'custom': value = '' v2 = False v_parameterization = False return value, v2, v_parameterization css = '' if os.path.exists('./style.css'): with open(os.path.join('./style.css'), 'r', encoding='utf8') as file: print('Load CSS...') css += file.read() + '\n' interface = gr.Blocks(css=css) with interface: dummy_true = gr.Label(value=True, visible=False) dummy_false = gr.Label(value=False, visible=False) with gr.Tab('Dreambooth'): gr.Markdown('Enter kohya finetuner parameter using this interface.') with gr.Accordion('Configuration file', open=False): with gr.Row(): button_open_config = gr.Button('Open 📂', elem_id='open_folder') button_save_config = gr.Button('Save 💾', elem_id='open_folder') button_save_as_config = gr.Button( 'Save as... 💾', elem_id='open_folder' ) config_file_name = gr.Textbox( label='', placeholder="type the configuration file path or use the 'Open' button above to select it...", interactive=True, ) # config_file_name.change( # remove_doublequote, # inputs=[config_file_name], # outputs=[config_file_name], # ) with gr.Tab('Source model'): # Define the input elements with gr.Row(): pretrained_model_name_or_path_input = gr.Textbox( label='Pretrained model name or path', placeholder='enter the path to custom model or name of pretrained model', ) pretrained_model_name_or_path_fille = gr.Button( document_symbol, elem_id='open_folder_small' ) pretrained_model_name_or_path_fille.click( get_file_path, inputs=[pretrained_model_name_or_path_input], outputs=pretrained_model_name_or_path_input, ) pretrained_model_name_or_path_folder = gr.Button( folder_symbol, elem_id='open_folder_small' ) pretrained_model_name_or_path_folder.click( get_folder_path, outputs=pretrained_model_name_or_path_input, ) model_list = gr.Dropdown( label='(Optional) Model Quick Pick', choices=[ 'custom', 'stabilityai/stable-diffusion-2-1-base', 'stabilityai/stable-diffusion-2-base', 'stabilityai/stable-diffusion-2-1', 'stabilityai/stable-diffusion-2', 'runwayml/stable-diffusion-v1-5', 'CompVis/stable-diffusion-v1-4', ], ) save_model_as_dropdown = gr.Dropdown( label='Save trained model as', choices=[ 'same as source model', 'ckpt', 'diffusers', 'diffusers_safetensors', 'safetensors', ], value='same as source model', ) with gr.Row(): v2_input = gr.Checkbox(label='v2', value=True) v_parameterization_input = gr.Checkbox( label='v_parameterization', value=False ) pretrained_model_name_or_path_input.change( remove_doublequote, inputs=[pretrained_model_name_or_path_input], outputs=[pretrained_model_name_or_path_input], ) model_list.change( set_pretrained_model_name_or_path_input, inputs=[model_list, v2_input, v_parameterization_input], outputs=[ pretrained_model_name_or_path_input, v2_input, v_parameterization_input, ], ) with gr.Tab('Directories'): with gr.Row(): train_data_dir_input = gr.Textbox( label='Image folder', placeholder='Folder where the training folders containing the images are located', ) train_data_dir_input_folder = gr.Button( '📂', elem_id='open_folder_small' ) train_data_dir_input_folder.click( get_folder_path, outputs=train_data_dir_input ) reg_data_dir_input = gr.Textbox( label='Regularisation folder', placeholder='(Optional) Folder where where the regularization folders containing the images are located', ) reg_data_dir_input_folder = gr.Button( '📂', elem_id='open_folder_small' ) reg_data_dir_input_folder.click( get_folder_path, outputs=reg_data_dir_input ) with gr.Row(): output_dir_input = gr.Textbox( label='Output folder', placeholder='Folder to output trained model', ) output_dir_input_folder = gr.Button( '📂', elem_id='open_folder_small' ) output_dir_input_folder.click( get_folder_path, outputs=output_dir_input ) logging_dir_input = gr.Textbox( label='Logging folder', placeholder='Optional: enable logging and output TensorBoard log to this folder', ) logging_dir_input_folder = gr.Button( '📂', elem_id='open_folder_small' ) logging_dir_input_folder.click( get_folder_path, outputs=logging_dir_input ) train_data_dir_input.change( remove_doublequote, inputs=[train_data_dir_input], outputs=[train_data_dir_input], ) reg_data_dir_input.change( remove_doublequote, inputs=[reg_data_dir_input], outputs=[reg_data_dir_input], ) output_dir_input.change( remove_doublequote, inputs=[output_dir_input], outputs=[output_dir_input], ) logging_dir_input.change( remove_doublequote, inputs=[logging_dir_input], outputs=[logging_dir_input], ) with gr.Tab('Training parameters'): with gr.Row(): learning_rate_input = gr.Textbox( label='Learning rate', value=1e-6 ) lr_scheduler_input = gr.Dropdown( label='LR Scheduler', choices=[ 'constant', 'constant_with_warmup', 'cosine', 'cosine_with_restarts', 'linear', 'polynomial', ], value='constant', ) lr_warmup_input = gr.Textbox(label='LR warmup', value=0) with gr.Row(): train_batch_size_input = gr.Slider( minimum=1, maximum=32, label='Train batch size', value=1, step=1, ) epoch_input = gr.Textbox(label='Epoch', value=1) save_every_n_epochs_input = gr.Textbox( label='Save every N epochs', value=1 ) with gr.Row(): mixed_precision_input = gr.Dropdown( label='Mixed precision', choices=[ 'no', 'fp16', 'bf16', ], value='fp16', ) save_precision_input = gr.Dropdown( label='Save precision', choices=[ 'float', 'fp16', 'bf16', ], value='fp16', ) num_cpu_threads_per_process_input = gr.Slider( minimum=1, maximum=os.cpu_count(), step=1, label='Number of CPU threads per process', value=os.cpu_count(), ) with gr.Row(): seed_input = gr.Textbox(label='Seed', value=1234) max_resolution_input = gr.Textbox( label='Max resolution', value='512,512', placeholder='512,512', ) with gr.Row(): caption_extention_input = gr.Textbox( label='Caption Extension', placeholder='(Optional) Extension for caption files. default: .caption', ) stop_text_encoder_training_input = gr.Slider( minimum=0, maximum=100, value=0, step=1, label='Stop text encoder training', ) with gr.Row(): enable_bucket_input = gr.Checkbox( label='Enable buckets', value=True ) cache_latent_input = gr.Checkbox( label='Cache latent', value=True ) use_8bit_adam_input = gr.Checkbox( label='Use 8bit adam', value=True ) xformers_input = gr.Checkbox(label='Use xformers', value=True) with gr.Accordion('Advanced Configuration', open=False): with gr.Row(): full_fp16_input = gr.Checkbox( label='Full fp16 training (experimental)', value=False ) no_token_padding_input = gr.Checkbox( label='No token padding', value=False ) gradient_checkpointing_input = gr.Checkbox( label='Gradient checkpointing', value=False ) shuffle_caption = gr.Checkbox( label='Shuffle caption', value=False ) save_state = gr.Checkbox(label='Save state', value=False) with gr.Row(): resume = gr.Textbox( label='Resume', placeholder='path to "last-state" state folder to resume from', ) resume_button = gr.Button('📂', elem_id='open_folder_small') resume_button.click(get_folder_path, outputs=resume) prior_loss_weight = gr.Number( label='Prior loss weight', value=1.0 ) button_run = gr.Button('Train model') with gr.Tab('Utilities'): with gr.Tab('Captioning'): gradio_basic_caption_gui_tab() gradio_blip_caption_gui_tab() gradio_wd14_caption_gui_tab() gradio_dreambooth_folder_creation_tab( train_data_dir_input, reg_data_dir_input, output_dir_input, logging_dir_input, ) gradio_dataset_balancing_tab() gradio_convert_model_tab() button_open_config.click( open_configuration, inputs=[ config_file_name, pretrained_model_name_or_path_input, v2_input, v_parameterization_input, logging_dir_input, train_data_dir_input, reg_data_dir_input, output_dir_input, max_resolution_input, learning_rate_input, lr_scheduler_input, lr_warmup_input, train_batch_size_input, epoch_input, save_every_n_epochs_input, mixed_precision_input, save_precision_input, seed_input, num_cpu_threads_per_process_input, cache_latent_input, caption_extention_input, enable_bucket_input, gradient_checkpointing_input, full_fp16_input, no_token_padding_input, stop_text_encoder_training_input, use_8bit_adam_input, xformers_input, save_model_as_dropdown, shuffle_caption, save_state, resume, prior_loss_weight, ], outputs=[ config_file_name, pretrained_model_name_or_path_input, v2_input, v_parameterization_input, logging_dir_input, train_data_dir_input, reg_data_dir_input, output_dir_input, max_resolution_input, learning_rate_input, lr_scheduler_input, lr_warmup_input, train_batch_size_input, epoch_input, save_every_n_epochs_input, mixed_precision_input, save_precision_input, seed_input, num_cpu_threads_per_process_input, cache_latent_input, caption_extention_input, enable_bucket_input, gradient_checkpointing_input, full_fp16_input, no_token_padding_input, stop_text_encoder_training_input, use_8bit_adam_input, xformers_input, save_model_as_dropdown, shuffle_caption, save_state, resume, prior_loss_weight, ], ) save_as = True not_save_as = False button_save_config.click( save_configuration, inputs=[ dummy_false, config_file_name, pretrained_model_name_or_path_input, v2_input, v_parameterization_input, logging_dir_input, train_data_dir_input, reg_data_dir_input, output_dir_input, max_resolution_input, learning_rate_input, lr_scheduler_input, lr_warmup_input, train_batch_size_input, epoch_input, save_every_n_epochs_input, mixed_precision_input, save_precision_input, seed_input, num_cpu_threads_per_process_input, cache_latent_input, caption_extention_input, enable_bucket_input, gradient_checkpointing_input, full_fp16_input, no_token_padding_input, stop_text_encoder_training_input, use_8bit_adam_input, xformers_input, save_model_as_dropdown, shuffle_caption, save_state, resume, prior_loss_weight, ], outputs=[config_file_name], ) button_save_as_config.click( save_configuration, inputs=[ dummy_true, config_file_name, pretrained_model_name_or_path_input, v2_input, v_parameterization_input, logging_dir_input, train_data_dir_input, reg_data_dir_input, output_dir_input, max_resolution_input, learning_rate_input, lr_scheduler_input, lr_warmup_input, train_batch_size_input, epoch_input, save_every_n_epochs_input, mixed_precision_input, save_precision_input, seed_input, num_cpu_threads_per_process_input, cache_latent_input, caption_extention_input, enable_bucket_input, gradient_checkpointing_input, full_fp16_input, no_token_padding_input, stop_text_encoder_training_input, use_8bit_adam_input, xformers_input, save_model_as_dropdown, shuffle_caption, save_state, resume, prior_loss_weight, ], outputs=[config_file_name], ) button_run.click( train_model, inputs=[ pretrained_model_name_or_path_input, v2_input, v_parameterization_input, logging_dir_input, train_data_dir_input, reg_data_dir_input, output_dir_input, max_resolution_input, learning_rate_input, lr_scheduler_input, lr_warmup_input, train_batch_size_input, epoch_input, save_every_n_epochs_input, mixed_precision_input, save_precision_input, seed_input, num_cpu_threads_per_process_input, cache_latent_input, caption_extention_input, enable_bucket_input, gradient_checkpointing_input, full_fp16_input, no_token_padding_input, stop_text_encoder_training_input, use_8bit_adam_input, xformers_input, save_model_as_dropdown, shuffle_caption, save_state, resume, prior_loss_weight, ], ) # Show the interface interface.launch() finetune_gui.py METASEP
[ { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(", "type": "infile" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(\n get_folder_path, outputs=output_dir_input\n )\n\n logging_dir_input = gr.Textbox(\n label='Logging folder',\n placeholder='Optional: enable logging and output TensorBoard log to this folder',\n )\n logging_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n logging_dir_input_folder.click(\n get_folder_path, outputs=logging_dir_input\n )\n train_dir_input.change(\n remove_doublequote,\n inputs=[train_dir_input],\n outputs=[train_dir_input],\n )\n image_folder_input.change(\n remove_doublequote,\n inputs=[image_folder_input],\n outputs=[image_folder_input],\n )\n output_dir_input.change(\n remove_doublequote,\n inputs=[output_dir_input],\n outputs=[output_dir_input],\n )\n with gr.Tab('Training parameters'):\n with gr.Row():\n learning_rate_input = gr.Textbox(\n label='Learning rate', value=1e-6\n )\n lr_scheduler_input = gr.Dropdown(\n label='LR Scheduler',\n choices=[\n 'constant',\n 'constant_with_warmup',\n 'cosine',\n 'cosine_with_restarts',\n 'linear',\n 'polynomial',\n ],\n value='constant',\n )\n lr_warmup_input = gr.Textbox(label='LR warmup', value=0)\n with gr.Row():\n dataset_repeats_input = gr.Textbox(\n label='Dataset repeats', value=40\n )\n train_batch_size_input = gr.Slider(\n minimum=1,\n maximum=32,\n label='Train batch size',\n value=1,\n step=1,\n )\n epoch_input = gr.Textbox(label='Epoch', value=1)\n save_every_n_epochs_input = gr.Textbox(\n label='Save every N epochs', value=1\n )\n with gr.Row():\n mixed_precision_input = gr.Dropdown(\n label='Mixed precision',\n choices=[\n 'no',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n save_precision_input = gr.Dropdown(\n label='Save precision',\n choices=[\n 'float',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n num_cpu_threads_per_process_input = gr.Slider(\n minimum=1,\n maximum=os.cpu_count(),\n step=1,\n label='Number of CPU threads per process',\n value=os.cpu_count(),\n )\n with gr.Row():\n seed_input = gr.Textbox(label='Seed', value=1234)\n max_resolution_input = gr.Textbox(\n label='Max resolution', value='512,512'\n )\n with gr.Row():\n caption_extention_input = gr.Textbox(\n label='Caption Extension',\n placeholder='(Optional) Extension for caption files. default: .txt',\n )\n train_text_encoder_input = gr.Checkbox(\n label='Train text encoder', value=True\n )\n with gr.Box():\n with gr.Row():\n create_caption = gr.Checkbox(\n label='Generate caption database', value=True\n )\n create_buckets = gr.Checkbox(\n label='Generate image buckets', value=True\n )\n train = gr.Checkbox(label='Train model', value=True)\n\n button_run = gr.Button('Run')\n\n button_run.click(\n train_model,\n inputs=[\n create_caption,\n create_buckets,\n train,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_open_config.click(\n open_config_file,\n inputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_save_config.click(\n save_configuration,\n inputs=[\n dummy_false,\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[config_file_name],\n )\n\n button_save_as_config.click(", "type": "infile" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(\n get_folder_path, outputs=output_dir_input\n )\n\n logging_dir_input = gr.Textbox(\n label='Logging folder',\n placeholder='Optional: enable logging and output TensorBoard log to this folder',\n )\n logging_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n logging_dir_input_folder.click(\n get_folder_path, outputs=logging_dir_input\n )\n train_dir_input.change(\n remove_doublequote,\n inputs=[train_dir_input],\n outputs=[train_dir_input],\n )\n image_folder_input.change(\n remove_doublequote,\n inputs=[image_folder_input],\n outputs=[image_folder_input],\n )\n output_dir_input.change(\n remove_doublequote,\n inputs=[output_dir_input],\n outputs=[output_dir_input],\n )\n with gr.Tab('Training parameters'):\n with gr.Row():\n learning_rate_input = gr.Textbox(\n label='Learning rate', value=1e-6\n )\n lr_scheduler_input = gr.Dropdown(\n label='LR Scheduler',\n choices=[\n 'constant',\n 'constant_with_warmup',\n 'cosine',\n 'cosine_with_restarts',\n 'linear',\n 'polynomial',\n ],\n value='constant',\n )\n lr_warmup_input = gr.Textbox(label='LR warmup', value=0)\n with gr.Row():\n dataset_repeats_input = gr.Textbox(\n label='Dataset repeats', value=40\n )\n train_batch_size_input = gr.Slider(\n minimum=1,\n maximum=32,\n label='Train batch size',\n value=1,\n step=1,\n )\n epoch_input = gr.Textbox(label='Epoch', value=1)\n save_every_n_epochs_input = gr.Textbox(\n label='Save every N epochs', value=1\n )\n with gr.Row():\n mixed_precision_input = gr.Dropdown(\n label='Mixed precision',\n choices=[\n 'no',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n save_precision_input = gr.Dropdown(\n label='Save precision',\n choices=[\n 'float',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n num_cpu_threads_per_process_input = gr.Slider(\n minimum=1,\n maximum=os.cpu_count(),\n step=1,\n label='Number of CPU threads per process',\n value=os.cpu_count(),\n )\n with gr.Row():\n seed_input = gr.Textbox(label='Seed', value=1234)\n max_resolution_input = gr.Textbox(\n label='Max resolution', value='512,512'\n )\n with gr.Row():\n caption_extention_input = gr.Textbox(\n label='Caption Extension',\n placeholder='(Optional) Extension for caption files. default: .txt',\n )\n train_text_encoder_input = gr.Checkbox(\n label='Train text encoder', value=True\n )\n with gr.Box():\n with gr.Row():\n create_caption = gr.Checkbox(\n label='Generate caption database', value=True\n )\n create_buckets = gr.Checkbox(\n label='Generate image buckets', value=True\n )\n train = gr.Checkbox(label='Train model', value=True)\n\n button_run = gr.Button('Run')\n\n button_run.click(\n train_model,\n inputs=[\n create_caption,\n create_buckets,\n train,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_open_config.click(", "type": "infile" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(\n get_folder_path, outputs=output_dir_input\n )\n\n logging_dir_input = gr.Textbox(\n label='Logging folder',\n placeholder='Optional: enable logging and output TensorBoard log to this folder',\n )\n logging_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n logging_dir_input_folder.click(\n get_folder_path, outputs=logging_dir_input\n )\n train_dir_input.change(\n remove_doublequote,\n inputs=[train_dir_input],\n outputs=[train_dir_input],\n )\n image_folder_input.change(\n remove_doublequote,\n inputs=[image_folder_input],\n outputs=[image_folder_input],\n )\n output_dir_input.change(\n remove_doublequote,\n inputs=[output_dir_input],\n outputs=[output_dir_input],\n )\n with gr.Tab('Training parameters'):\n with gr.Row():\n learning_rate_input = gr.Textbox(\n label='Learning rate', value=1e-6\n )\n lr_scheduler_input = gr.Dropdown(\n label='LR Scheduler',\n choices=[\n 'constant',\n 'constant_with_warmup',\n 'cosine',\n 'cosine_with_restarts',\n 'linear',\n 'polynomial',\n ],\n value='constant',\n )\n lr_warmup_input = gr.Textbox(label='LR warmup', value=0)\n with gr.Row():\n dataset_repeats_input = gr.Textbox(\n label='Dataset repeats', value=40\n )\n train_batch_size_input = gr.Slider(\n minimum=1,\n maximum=32,\n label='Train batch size',\n value=1,\n step=1,\n )\n epoch_input = gr.Textbox(label='Epoch', value=1)\n save_every_n_epochs_input = gr.Textbox(\n label='Save every N epochs', value=1\n )\n with gr.Row():\n mixed_precision_input = gr.Dropdown(\n label='Mixed precision',\n choices=[\n 'no',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n save_precision_input = gr.Dropdown(\n label='Save precision',\n choices=[\n 'float',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n num_cpu_threads_per_process_input = gr.Slider(\n minimum=1,\n maximum=os.cpu_count(),\n step=1,\n label='Number of CPU threads per process',\n value=os.cpu_count(),\n )\n with gr.Row():\n seed_input = gr.Textbox(label='Seed', value=1234)\n max_resolution_input = gr.Textbox(\n label='Max resolution', value='512,512'\n )\n with gr.Row():\n caption_extention_input = gr.Textbox(\n label='Caption Extension',\n placeholder='(Optional) Extension for caption files. default: .txt',\n )\n train_text_encoder_input = gr.Checkbox(\n label='Train text encoder', value=True\n )\n with gr.Box():\n with gr.Row():\n create_caption = gr.Checkbox(\n label='Generate caption database', value=True\n )\n create_buckets = gr.Checkbox(\n label='Generate image buckets', value=True\n )\n train = gr.Checkbox(label='Train model', value=True)\n\n button_run = gr.Button('Run')\n\n button_run.click(", "type": "infile" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(", "type": "infile" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(\n get_folder_path, outputs=output_dir_input\n )\n\n logging_dir_input = gr.Textbox(\n label='Logging folder',\n placeholder='Optional: enable logging and output TensorBoard log to this folder',\n )\n logging_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n logging_dir_input_folder.click(", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(\n get_folder_path, outputs=output_dir_input\n )\n\n logging_dir_input = gr.Textbox(\n label='Logging folder',\n placeholder='Optional: enable logging and output TensorBoard log to this folder',\n )\n logging_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n logging_dir_input_folder.click(\n get_folder_path, outputs=logging_dir_input\n )\n train_dir_input.change(\n remove_doublequote,\n inputs=[train_dir_input],\n outputs=[train_dir_input],\n )\n image_folder_input.change(\n remove_doublequote,\n inputs=[image_folder_input],\n outputs=[image_folder_input],\n )\n output_dir_input.change(\n remove_doublequote,\n inputs=[output_dir_input],\n outputs=[output_dir_input],\n )\n with gr.Tab('Training parameters'):\n with gr.Row():\n learning_rate_input = gr.Textbox(\n label='Learning rate', value=1e-6\n )\n lr_scheduler_input = gr.Dropdown(\n label='LR Scheduler',\n choices=[\n 'constant',\n 'constant_with_warmup',\n 'cosine',\n 'cosine_with_restarts',\n 'linear',\n 'polynomial',\n ],\n value='constant',\n )\n lr_warmup_input = gr.Textbox(label='LR warmup', value=0)\n with gr.Row():\n dataset_repeats_input = gr.Textbox(\n label='Dataset repeats', value=40\n )\n train_batch_size_input = gr.Slider(\n minimum=1,\n maximum=32,\n label='Train batch size',\n value=1,\n step=1,\n )\n epoch_input = gr.Textbox(label='Epoch', value=1)\n save_every_n_epochs_input = gr.Textbox(\n label='Save every N epochs', value=1\n )\n with gr.Row():\n mixed_precision_input = gr.Dropdown(\n label='Mixed precision',\n choices=[\n 'no',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n save_precision_input = gr.Dropdown(\n label='Save precision',\n choices=[\n 'float',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n num_cpu_threads_per_process_input = gr.Slider(\n minimum=1,\n maximum=os.cpu_count(),\n step=1,\n label='Number of CPU threads per process',\n value=os.cpu_count(),\n )\n with gr.Row():\n seed_input = gr.Textbox(label='Seed', value=1234)\n max_resolution_input = gr.Textbox(\n label='Max resolution', value='512,512'\n )\n with gr.Row():\n caption_extention_input = gr.Textbox(\n label='Caption Extension',\n placeholder='(Optional) Extension for caption files. default: .txt',\n )\n train_text_encoder_input = gr.Checkbox(\n label='Train text encoder', value=True\n )\n with gr.Box():\n with gr.Row():\n create_caption = gr.Checkbox(\n label='Generate caption database', value=True\n )\n create_buckets = gr.Checkbox(\n label='Generate image buckets', value=True\n )\n train = gr.Checkbox(label='Train model', value=True)\n\n button_run = gr.Button('Run')\n\n button_run.click(\n train_model,\n inputs=[\n create_caption,\n create_buckets,\n train,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_open_config.click(\n open_config_file,\n inputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_save_config.click(\n save_configuration,\n inputs=[\n dummy_false,\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[config_file_name],\n )\n\n button_save_as_config.click(\n save_configuration,\n inputs=[\n dummy_true,\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[config_file_name],\n )\n\n with gr.Tab('Utilities'):", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(\n get_folder_path, outputs=output_dir_input\n )\n\n logging_dir_input = gr.Textbox(\n label='Logging folder',\n placeholder='Optional: enable logging and output TensorBoard log to this folder',\n )\n logging_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n logging_dir_input_folder.click(\n get_folder_path, outputs=logging_dir_input\n )\n train_dir_input.change(\n remove_doublequote,\n inputs=[train_dir_input],\n outputs=[train_dir_input],\n )\n image_folder_input.change(\n remove_doublequote,\n inputs=[image_folder_input],\n outputs=[image_folder_input],\n )\n output_dir_input.change(\n remove_doublequote,\n inputs=[output_dir_input],\n outputs=[output_dir_input],\n )\n with gr.Tab('Training parameters'):\n with gr.Row():\n learning_rate_input = gr.Textbox(\n label='Learning rate', value=1e-6\n )\n lr_scheduler_input = gr.Dropdown(\n label='LR Scheduler',\n choices=[\n 'constant',\n 'constant_with_warmup',\n 'cosine',\n 'cosine_with_restarts',\n 'linear',\n 'polynomial',\n ],\n value='constant',\n )\n lr_warmup_input = gr.Textbox(label='LR warmup', value=0)\n with gr.Row():\n dataset_repeats_input = gr.Textbox(\n label='Dataset repeats', value=40\n )\n train_batch_size_input = gr.Slider(\n minimum=1,\n maximum=32,\n label='Train batch size',\n value=1,\n step=1,\n )\n epoch_input = gr.Textbox(label='Epoch', value=1)\n save_every_n_epochs_input = gr.Textbox(\n label='Save every N epochs', value=1\n )\n with gr.Row():\n mixed_precision_input = gr.Dropdown(\n label='Mixed precision',\n choices=[\n 'no',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n save_precision_input = gr.Dropdown(\n label='Save precision',\n choices=[\n 'float',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n num_cpu_threads_per_process_input = gr.Slider(\n minimum=1,\n maximum=os.cpu_count(),\n step=1,\n label='Number of CPU threads per process',\n value=os.cpu_count(),\n )\n with gr.Row():\n seed_input = gr.Textbox(label='Seed', value=1234)\n max_resolution_input = gr.Textbox(\n label='Max resolution', value='512,512'\n )\n with gr.Row():\n caption_extention_input = gr.Textbox(\n label='Caption Extension',\n placeholder='(Optional) Extension for caption files. default: .txt',\n )\n train_text_encoder_input = gr.Checkbox(\n label='Train text encoder', value=True\n )\n with gr.Box():\n with gr.Row():\n create_caption = gr.Checkbox(\n label='Generate caption database', value=True\n )\n create_buckets = gr.Checkbox(\n label='Generate image buckets', value=True\n )\n train = gr.Checkbox(label='Train model', value=True)\n\n button_run = gr.Button('Run')\n\n button_run.click(\n train_model,\n inputs=[\n create_caption,\n create_buckets,\n train,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_open_config.click(\n open_config_file,\n inputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_save_config.click(\n save_configuration,\n inputs=[\n dummy_false,\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[config_file_name],\n )\n\n button_save_as_config.click(\n save_configuration,\n inputs=[\n dummy_true,\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[config_file_name],\n )\n\n with gr.Tab('Utilities'):\n gradio_basic_caption_gui_tab()", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(\n get_folder_path, outputs=output_dir_input\n )\n\n logging_dir_input = gr.Textbox(\n label='Logging folder',\n placeholder='Optional: enable logging and output TensorBoard log to this folder',\n )\n logging_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n logging_dir_input_folder.click(\n get_folder_path, outputs=logging_dir_input\n )\n train_dir_input.change(\n remove_doublequote,\n inputs=[train_dir_input],\n outputs=[train_dir_input],\n )\n image_folder_input.change(\n remove_doublequote,\n inputs=[image_folder_input],\n outputs=[image_folder_input],\n )\n output_dir_input.change(\n remove_doublequote,\n inputs=[output_dir_input],\n outputs=[output_dir_input],\n )\n with gr.Tab('Training parameters'):\n with gr.Row():\n learning_rate_input = gr.Textbox(\n label='Learning rate', value=1e-6\n )\n lr_scheduler_input = gr.Dropdown(\n label='LR Scheduler',\n choices=[\n 'constant',\n 'constant_with_warmup',\n 'cosine',\n 'cosine_with_restarts',\n 'linear',\n 'polynomial',\n ],\n value='constant',\n )\n lr_warmup_input = gr.Textbox(label='LR warmup', value=0)\n with gr.Row():\n dataset_repeats_input = gr.Textbox(\n label='Dataset repeats', value=40\n )\n train_batch_size_input = gr.Slider(\n minimum=1,\n maximum=32,\n label='Train batch size',\n value=1,\n step=1,\n )\n epoch_input = gr.Textbox(label='Epoch', value=1)\n save_every_n_epochs_input = gr.Textbox(\n label='Save every N epochs', value=1\n )\n with gr.Row():\n mixed_precision_input = gr.Dropdown(\n label='Mixed precision',\n choices=[\n 'no',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n save_precision_input = gr.Dropdown(\n label='Save precision',\n choices=[\n 'float',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n num_cpu_threads_per_process_input = gr.Slider(\n minimum=1,\n maximum=os.cpu_count(),\n step=1,\n label='Number of CPU threads per process',\n value=os.cpu_count(),\n )\n with gr.Row():\n seed_input = gr.Textbox(label='Seed', value=1234)\n max_resolution_input = gr.Textbox(\n label='Max resolution', value='512,512'\n )\n with gr.Row():\n caption_extention_input = gr.Textbox(\n label='Caption Extension',\n placeholder='(Optional) Extension for caption files. default: .txt',\n )\n train_text_encoder_input = gr.Checkbox(\n label='Train text encoder', value=True\n )\n with gr.Box():\n with gr.Row():\n create_caption = gr.Checkbox(\n label='Generate caption database', value=True\n )\n create_buckets = gr.Checkbox(\n label='Generate image buckets', value=True\n )\n train = gr.Checkbox(label='Train model', value=True)\n\n button_run = gr.Button('Run')\n\n button_run.click(\n train_model,\n inputs=[\n create_caption,\n create_buckets,\n train,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_open_config.click(\n open_config_file,\n inputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_save_config.click(\n save_configuration,\n inputs=[\n dummy_false,\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[config_file_name],\n )\n\n button_save_as_config.click(\n save_configuration,\n inputs=[\n dummy_true,\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[config_file_name],\n )\n\n with gr.Tab('Utilities'):\n gradio_basic_caption_gui_tab()\n gradio_blip_caption_gui_tab()", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(\n get_folder_path, outputs=output_dir_input\n )\n\n logging_dir_input = gr.Textbox(\n label='Logging folder',\n placeholder='Optional: enable logging and output TensorBoard log to this folder',\n )\n logging_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n logging_dir_input_folder.click(\n get_folder_path, outputs=logging_dir_input\n )\n train_dir_input.change(\n remove_doublequote,\n inputs=[train_dir_input],\n outputs=[train_dir_input],\n )\n image_folder_input.change(\n remove_doublequote,\n inputs=[image_folder_input],\n outputs=[image_folder_input],\n )\n output_dir_input.change(\n remove_doublequote,\n inputs=[output_dir_input],\n outputs=[output_dir_input],\n )\n with gr.Tab('Training parameters'):\n with gr.Row():\n learning_rate_input = gr.Textbox(\n label='Learning rate', value=1e-6\n )\n lr_scheduler_input = gr.Dropdown(\n label='LR Scheduler',\n choices=[\n 'constant',\n 'constant_with_warmup',\n 'cosine',\n 'cosine_with_restarts',\n 'linear',\n 'polynomial',\n ],\n value='constant',\n )\n lr_warmup_input = gr.Textbox(label='LR warmup', value=0)\n with gr.Row():\n dataset_repeats_input = gr.Textbox(\n label='Dataset repeats', value=40\n )\n train_batch_size_input = gr.Slider(\n minimum=1,\n maximum=32,\n label='Train batch size',\n value=1,\n step=1,\n )\n epoch_input = gr.Textbox(label='Epoch', value=1)\n save_every_n_epochs_input = gr.Textbox(\n label='Save every N epochs', value=1\n )\n with gr.Row():\n mixed_precision_input = gr.Dropdown(\n label='Mixed precision',\n choices=[\n 'no',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n save_precision_input = gr.Dropdown(\n label='Save precision',\n choices=[\n 'float',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n num_cpu_threads_per_process_input = gr.Slider(\n minimum=1,\n maximum=os.cpu_count(),\n step=1,\n label='Number of CPU threads per process',\n value=os.cpu_count(),\n )\n with gr.Row():\n seed_input = gr.Textbox(label='Seed', value=1234)\n max_resolution_input = gr.Textbox(\n label='Max resolution', value='512,512'\n )\n with gr.Row():\n caption_extention_input = gr.Textbox(\n label='Caption Extension',\n placeholder='(Optional) Extension for caption files. default: .txt',\n )\n train_text_encoder_input = gr.Checkbox(\n label='Train text encoder', value=True\n )\n with gr.Box():\n with gr.Row():\n create_caption = gr.Checkbox(\n label='Generate caption database', value=True\n )\n create_buckets = gr.Checkbox(\n label='Generate image buckets', value=True\n )\n train = gr.Checkbox(label='Train model', value=True)\n\n button_run = gr.Button('Run')\n\n button_run.click(\n train_model,\n inputs=[\n create_caption,\n create_buckets,\n train,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_open_config.click(\n open_config_file,\n inputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_save_config.click(\n save_configuration,\n inputs=[\n dummy_false,\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[config_file_name],\n )\n\n button_save_as_config.click(\n save_configuration,\n inputs=[\n dummy_true,\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[config_file_name],\n )\n\n with gr.Tab('Utilities'):\n gradio_basic_caption_gui_tab()\n gradio_blip_caption_gui_tab()\n gradio_wd14_caption_gui_tab()", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(", "type": "inproject" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n", "type": "common" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:", "type": "commited" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,", "type": "commited" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n", "type": "commited" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n", "type": "commited" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(\n get_folder_path, outputs=output_dir_input\n )\n\n logging_dir_input = gr.Textbox(\n label='Logging folder',\n placeholder='Optional: enable logging and output TensorBoard log to this folder',\n )\n logging_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n logging_dir_input_folder.click(\n get_folder_path, outputs=logging_dir_input\n )\n train_dir_input.change(\n remove_doublequote,\n inputs=[train_dir_input],\n outputs=[train_dir_input],\n )\n image_folder_input.change(\n remove_doublequote,\n inputs=[image_folder_input],\n outputs=[image_folder_input],\n )\n output_dir_input.change(\n remove_doublequote,\n inputs=[output_dir_input],\n outputs=[output_dir_input],\n )\n with gr.Tab('Training parameters'):\n with gr.Row():\n learning_rate_input = gr.Textbox(\n label='Learning rate', value=1e-6\n )\n lr_scheduler_input = gr.Dropdown(\n label='LR Scheduler',\n choices=[\n 'constant',\n 'constant_with_warmup',\n 'cosine',\n 'cosine_with_restarts',\n 'linear',\n 'polynomial',\n ],\n value='constant',\n )\n lr_warmup_input = gr.Textbox(label='LR warmup', value=0)\n with gr.Row():\n dataset_repeats_input = gr.Textbox(\n label='Dataset repeats', value=40\n )\n train_batch_size_input = gr.Slider(\n minimum=1,\n maximum=32,\n label='Train batch size',\n value=1,\n step=1,\n )\n epoch_input = gr.Textbox(label='Epoch', value=1)\n save_every_n_epochs_input = gr.Textbox(\n label='Save every N epochs', value=1\n )\n with gr.Row():\n mixed_precision_input = gr.Dropdown(\n label='Mixed precision',\n choices=[\n 'no',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n save_precision_input = gr.Dropdown(\n label='Save precision',\n choices=[\n 'float',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n num_cpu_threads_per_process_input = gr.Slider(\n minimum=1,\n maximum=os.cpu_count(),\n step=1,\n label='Number of CPU threads per process',\n value=os.cpu_count(),\n )\n with gr.Row():\n seed_input = gr.Textbox(label='Seed', value=1234)\n max_resolution_input = gr.Textbox(\n label='Max resolution', value='512,512'\n )\n with gr.Row():\n caption_extention_input = gr.Textbox(\n label='Caption Extension',\n placeholder='(Optional) Extension for caption files. default: .txt',\n )\n train_text_encoder_input = gr.Checkbox(\n label='Train text encoder', value=True\n )\n with gr.Box():\n with gr.Row():\n create_caption = gr.Checkbox(\n label='Generate caption database', value=True\n )\n create_buckets = gr.Checkbox(\n label='Generate image buckets', value=True\n )", "type": "commited" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),", "type": "commited" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,", "type": "commited" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:", "type": "non_informative" }, { "content": "import gradio as gr\nimport json", "type": "non_informative" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]", "type": "non_informative" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n", "type": "non_informative" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,", "type": "random" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:", "type": "random" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )", "type": "random" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(\n get_folder_path, outputs=output_dir_input\n )\n\n logging_dir_input = gr.Textbox(\n label='Logging folder',\n placeholder='Optional: enable logging and output TensorBoard log to this folder',\n )\n logging_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n logging_dir_input_folder.click(\n get_folder_path, outputs=logging_dir_input\n )\n train_dir_input.change(\n remove_doublequote,\n inputs=[train_dir_input],\n outputs=[train_dir_input],\n )\n image_folder_input.change(\n remove_doublequote,\n inputs=[image_folder_input],\n outputs=[image_folder_input],\n )\n output_dir_input.change(\n remove_doublequote,\n inputs=[output_dir_input],\n outputs=[output_dir_input],\n )\n with gr.Tab('Training parameters'):\n with gr.Row():\n learning_rate_input = gr.Textbox(\n label='Learning rate', value=1e-6\n )\n lr_scheduler_input = gr.Dropdown(\n label='LR Scheduler',\n choices=[\n 'constant',\n 'constant_with_warmup',\n 'cosine',\n 'cosine_with_restarts',\n 'linear',\n 'polynomial',\n ],\n value='constant',\n )\n lr_warmup_input = gr.Textbox(label='LR warmup', value=0)\n with gr.Row():\n dataset_repeats_input = gr.Textbox(\n label='Dataset repeats', value=40\n )\n train_batch_size_input = gr.Slider(\n minimum=1,\n maximum=32,\n label='Train batch size',\n value=1,\n step=1,\n )\n epoch_input = gr.Textbox(label='Epoch', value=1)\n save_every_n_epochs_input = gr.Textbox(\n label='Save every N epochs', value=1\n )\n with gr.Row():\n mixed_precision_input = gr.Dropdown(\n label='Mixed precision',\n choices=[\n 'no',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n save_precision_input = gr.Dropdown(\n label='Save precision',\n choices=[\n 'float',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n num_cpu_threads_per_process_input = gr.Slider(\n minimum=1,\n maximum=os.cpu_count(),\n step=1,\n label='Number of CPU threads per process',\n value=os.cpu_count(),\n )\n with gr.Row():\n seed_input = gr.Textbox(label='Seed', value=1234)\n max_resolution_input = gr.Textbox(\n label='Max resolution', value='512,512'\n )\n with gr.Row():\n caption_extention_input = gr.Textbox(\n label='Caption Extension',\n placeholder='(Optional) Extension for caption files. default: .txt',\n )\n train_text_encoder_input = gr.Checkbox(\n label='Train text encoder', value=True\n )\n with gr.Box():\n with gr.Row():\n create_caption = gr.Checkbox(\n label='Generate caption database', value=True\n )\n create_buckets = gr.Checkbox(\n label='Generate image buckets', value=True\n )\n train = gr.Checkbox(label='Train model', value=True)\n\n button_run = gr.Button('Run')\n\n button_run.click(\n train_model,\n inputs=[\n create_caption,\n create_buckets,\n train,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_open_config.click(\n open_config_file,\n inputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_save_config.click(\n save_configuration,\n inputs=[\n dummy_false,\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[config_file_name],\n )\n\n button_save_as_config.click(\n save_configuration,\n inputs=[\n dummy_true,\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,", "type": "random" }, { "content": "import gradio as gr\nimport json\nimport math\nimport os\nimport subprocess\nimport pathlib\nimport shutil\n\n# from easygui import fileopenbox, filesavebox, diropenbox, msgbox\nfrom library.basic_caption_gui import gradio_basic_caption_gui_tab\nfrom library.convert_model_gui import gradio_convert_model_tab\nfrom library.blip_caption_gui import gradio_blip_caption_gui_tab\nfrom library.wd14_caption_gui import gradio_wd14_caption_gui_tab\nfrom library.common_gui import (\n get_folder_path,\n get_file_path,\n get_saveasfile_path,\n)\n\nfolder_symbol = '\\U0001f4c2' # 📂\nrefresh_symbol = '\\U0001f504' # 🔄\nsave_style_symbol = '\\U0001f4be' # 💾\ndocument_symbol = '\\U0001F4C4' # 📄\n\n\ndef save_configuration(\n save_as,\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n\n save_as_bool = True if save_as.get('label') == 'True' else False\n\n if save_as_bool:\n print('Save as...')\n file_path = get_saveasfile_path(file_path)\n else:\n print('Save...')\n if file_path == None or file_path == '':\n file_path = get_saveasfile_path(file_path)\n\n # print(file_path)\n\n if file_path == None:\n return original_file_path\n\n # Return the values of the variables as a dictionary\n variables = {\n 'pretrained_model_name_or_path': pretrained_model_name_or_path,\n 'v2': v2,\n 'v_parameterization': v_parameterization,\n 'train_dir': train_dir,\n 'image_folder': image_folder,\n 'output_dir': output_dir,\n 'logging_dir': logging_dir,\n 'max_resolution': max_resolution,\n 'learning_rate': learning_rate,\n 'lr_scheduler': lr_scheduler,\n 'lr_warmup': lr_warmup,\n 'dataset_repeats': dataset_repeats,\n 'train_batch_size': train_batch_size,\n 'epoch': epoch,\n 'save_every_n_epochs': save_every_n_epochs,\n 'mixed_precision': mixed_precision,\n 'save_precision': save_precision,\n 'seed': seed,\n 'num_cpu_threads_per_process': num_cpu_threads_per_process,\n 'train_text_encoder': train_text_encoder,\n 'create_buckets': create_buckets,\n 'create_caption': create_caption,\n 'train': train,\n 'save_model_as': save_model_as,\n 'caption_extension': caption_extension,\n }\n\n # Save the data to the selected file\n # with open(file_path, 'w') as file:\n # json.dump(variables, file)\n # msgbox('File was saved...')\n\n return file_path\n\n\ndef open_config_file(\n file_path,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n create_buckets,\n create_caption,\n train,\n save_model_as,\n caption_extension,\n):\n original_file_path = file_path\n file_path = get_file_path(file_path)\n\n if file_path != '' and file_path != None:\n print(file_path)\n # load variables from JSON file\n with open(file_path, 'r') as f:\n my_data = json.load(f)\n else:\n file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action\n my_data = {}\n\n # Return the values of the variables as a dictionary\n return (\n file_path,\n my_data.get(\n 'pretrained_model_name_or_path', pretrained_model_name_or_path\n ),\n my_data.get('v2', v2),\n my_data.get('v_parameterization', v_parameterization),\n my_data.get('train_dir', train_dir),\n my_data.get('image_folder', image_folder),\n my_data.get('output_dir', output_dir),\n my_data.get('logging_dir', logging_dir),\n my_data.get('max_resolution', max_resolution),\n my_data.get('learning_rate', learning_rate),\n my_data.get('lr_scheduler', lr_scheduler),\n my_data.get('lr_warmup', lr_warmup),\n my_data.get('dataset_repeats', dataset_repeats),\n my_data.get('train_batch_size', train_batch_size),\n my_data.get('epoch', epoch),\n my_data.get('save_every_n_epochs', save_every_n_epochs),\n my_data.get('mixed_precision', mixed_precision),\n my_data.get('save_precision', save_precision),\n my_data.get('seed', seed),\n my_data.get(\n 'num_cpu_threads_per_process', num_cpu_threads_per_process\n ),\n my_data.get('train_text_encoder', train_text_encoder),\n my_data.get('create_buckets', create_buckets),\n my_data.get('create_caption', create_caption),\n my_data.get('train', train),\n my_data.get('save_model_as', save_model_as),\n my_data.get('caption_extension', caption_extension),\n )\n\n\ndef train_model(\n generate_caption_database,\n generate_image_buckets,\n train,\n pretrained_model_name_or_path,\n v2,\n v_parameterization,\n train_dir,\n image_folder,\n output_dir,\n logging_dir,\n max_resolution,\n learning_rate,\n lr_scheduler,\n lr_warmup,\n dataset_repeats,\n train_batch_size,\n epoch,\n save_every_n_epochs,\n mixed_precision,\n save_precision,\n seed,\n num_cpu_threads_per_process,\n train_text_encoder,\n save_model_as,\n caption_extension,\n):\n def save_inference_file(output_dir, v2, v_parameterization):\n # Copy inference model for v2 if required\n if v2 and v_parameterization:\n print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference-v.yaml',\n f'{output_dir}/last.yaml',\n )\n elif v2:\n print(f'Saving v2-inference.yaml as {output_dir}/last.yaml')\n shutil.copy(\n f'./v2_inference/v2-inference.yaml',\n f'{output_dir}/last.yaml',\n )\n\n # create caption json file\n if generate_caption_database:\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n\n run_cmd = (\n f'./venv/Scripts/python.exe finetune/merge_captions_to_metadata.py'\n )\n if caption_extension == '':\n run_cmd += f' --caption_extension=\".txt\"'\n else:\n run_cmd += f' --caption_extension={caption_extension}'\n run_cmd += f' {image_folder}'\n run_cmd += f' {train_dir}/meta_cap.json'\n run_cmd += f' --full_path'\n\n print(run_cmd)\n\n # Run the command\n subprocess.run(run_cmd)\n\n # create images buckets\n if generate_image_buckets:\n command = [\n './venv/Scripts/python.exe',\n 'finetune/prepare_buckets_latents.py',\n image_folder,\n '{}/meta_cap.json'.format(train_dir),\n '{}/meta_lat.json'.format(train_dir),\n pretrained_model_name_or_path,\n '--batch_size',\n '4',\n '--max_resolution',\n max_resolution,\n '--mixed_precision',\n mixed_precision,\n '--full_path',\n ]\n\n print(command)\n\n # Run the command\n subprocess.run(command)\n\n if train:\n image_num = len(\n [f for f in os.listdir(image_folder) if f.endswith('.npz')]\n )\n print(f'image_num = {image_num}')\n\n repeats = int(image_num) * int(dataset_repeats)\n print(f'repeats = {str(repeats)}')\n\n # calculate max_train_steps\n max_train_steps = int(\n math.ceil(float(repeats) / int(train_batch_size) * int(epoch))\n )\n print(f'max_train_steps = {max_train_steps}')\n\n lr_warmup_steps = round(\n float(int(lr_warmup) * int(max_train_steps) / 100)\n )\n print(f'lr_warmup_steps = {lr_warmup_steps}')\n\n run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} \"./fine_tune.py\"'\n if v2:\n run_cmd += ' --v2'\n if v_parameterization:\n run_cmd += ' --v_parameterization'\n if train_text_encoder:\n run_cmd += ' --train_text_encoder'\n run_cmd += (\n f' --pretrained_model_name_or_path={pretrained_model_name_or_path}'\n )\n run_cmd += f' --in_json={train_dir}/meta_lat.json'\n run_cmd += f' --train_data_dir={image_folder}'\n run_cmd += f' --output_dir={output_dir}'\n if not logging_dir == '':\n run_cmd += f' --logging_dir={logging_dir}'\n run_cmd += f' --train_batch_size={train_batch_size}'\n run_cmd += f' --dataset_repeats={dataset_repeats}'\n run_cmd += f' --learning_rate={learning_rate}'\n run_cmd += f' --lr_scheduler={lr_scheduler}'\n run_cmd += f' --lr_warmup_steps={lr_warmup_steps}'\n run_cmd += f' --max_train_steps={max_train_steps}'\n run_cmd += f' --use_8bit_adam'\n run_cmd += f' --xformers'\n run_cmd += f' --mixed_precision={mixed_precision}'\n run_cmd += f' --save_every_n_epochs={save_every_n_epochs}'\n run_cmd += f' --seed={seed}'\n run_cmd += f' --save_precision={save_precision}'\n if not save_model_as == 'same as source model':\n run_cmd += f' --save_model_as={save_model_as}'\n\n print(run_cmd)\n # Run the command\n subprocess.run(run_cmd)\n\n # check if output_dir/last is a folder... therefore it is a diffuser model\n last_dir = pathlib.Path(f'{output_dir}/last')\n\n if not last_dir.is_dir():\n # Copy inference model for v2 if required\n save_inference_file(output_dir, v2, v_parameterization)\n\n\ndef set_pretrained_model_name_or_path_input(value, v2, v_parameterization):\n # define a list of substrings to search for\n substrings_v2 = [\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list\n if str(value) in substrings_v2:\n print('SD v2 model detected. Setting --v2 parameter')\n v2 = True\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n # define a list of substrings to search for v-objective\n substrings_v_parameterization = [\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n ]\n\n # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list\n if str(value) in substrings_v_parameterization:\n print(\n 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization'\n )\n v2 = True\n v_parameterization = True\n\n return value, v2, v_parameterization\n\n # define a list of substrings to v1.x\n substrings_v1_model = [\n 'CompVis/stable-diffusion-v1-4',\n 'runwayml/stable-diffusion-v1-5',\n ]\n\n if str(value) in substrings_v1_model:\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n if value == 'custom':\n value = ''\n v2 = False\n v_parameterization = False\n\n return value, v2, v_parameterization\n\n\ndef remove_doublequote(file_path):\n if file_path != None:\n file_path = file_path.replace('\"', '')\n\n return file_path\n\n\ncss = ''\n\nif os.path.exists('./style.css'):\n with open(os.path.join('./style.css'), 'r', encoding='utf8') as file:\n print('Load CSS...')\n css += file.read() + '\\n'\n\ninterface = gr.Blocks(css=css)\n\nwith interface:\n dummy_true = gr.Label(value=True, visible=False)\n dummy_false = gr.Label(value=False, visible=False)\n with gr.Tab('Finetuning'):\n gr.Markdown('Enter kohya finetuner parameter using this interface.')\n with gr.Accordion('Configuration File Load/Save', open=False):\n with gr.Row():\n button_open_config = gr.Button(\n f'Open {folder_symbol}', elem_id='open_folder'\n )\n button_save_config = gr.Button(\n f'Save {save_style_symbol}', elem_id='open_folder'\n )\n button_save_as_config = gr.Button(\n f'Save as... {save_style_symbol}', elem_id='open_folder'\n )\n config_file_name = gr.Textbox(\n label='', placeholder='type file path or use buttons...'\n )\n config_file_name.change(\n remove_doublequote,\n inputs=[config_file_name],\n outputs=[config_file_name],\n )\n with gr.Tab('Source model'):\n # Define the input elements\n with gr.Row():\n pretrained_model_name_or_path_input = gr.Textbox(\n label='Pretrained model name or path',\n placeholder='enter the path to custom model or name of pretrained model',\n )\n pretrained_model_name_or_path_file = gr.Button(\n document_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_file.click(\n get_file_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n pretrained_model_name_or_path_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n pretrained_model_name_or_path_folder.click(\n get_folder_path,\n inputs=pretrained_model_name_or_path_input,\n outputs=pretrained_model_name_or_path_input,\n )\n model_list = gr.Dropdown(\n label='(Optional) Model Quick Pick',\n choices=[\n 'custom',\n 'stabilityai/stable-diffusion-2-1-base',\n 'stabilityai/stable-diffusion-2-base',\n 'stabilityai/stable-diffusion-2-1',\n 'stabilityai/stable-diffusion-2',\n 'runwayml/stable-diffusion-v1-5',\n 'CompVis/stable-diffusion-v1-4',\n ],\n )\n save_model_as_dropdown = gr.Dropdown(\n label='Save trained model as',\n choices=[\n 'same as source model',\n 'ckpt',\n 'diffusers',\n 'diffusers_safetensors',\n 'safetensors',\n ],\n value='same as source model',\n )\n\n with gr.Row():\n v2_input = gr.Checkbox(label='v2', value=True)\n v_parameterization_input = gr.Checkbox(\n label='v_parameterization', value=False\n )\n model_list.change(\n set_pretrained_model_name_or_path_input,\n inputs=[model_list, v2_input, v_parameterization_input],\n outputs=[\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n ],\n )\n with gr.Tab('Directories'):\n with gr.Row():\n train_dir_input = gr.Textbox(\n label='Training config folder',\n placeholder='folder where the training configuration files will be saved',\n )\n train_dir_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n train_dir_folder.click(\n get_folder_path, outputs=train_dir_input\n )\n\n image_folder_input = gr.Textbox(\n label='Training Image folder',\n placeholder='folder where the training images are located',\n )\n image_folder_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n image_folder_input_folder.click(\n get_folder_path, outputs=image_folder_input\n )\n with gr.Row():\n output_dir_input = gr.Textbox(\n label='Output folder',\n placeholder='folder where the model will be saved',\n )\n output_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n output_dir_input_folder.click(\n get_folder_path, outputs=output_dir_input\n )\n\n logging_dir_input = gr.Textbox(\n label='Logging folder',\n placeholder='Optional: enable logging and output TensorBoard log to this folder',\n )\n logging_dir_input_folder = gr.Button(\n folder_symbol, elem_id='open_folder_small'\n )\n logging_dir_input_folder.click(\n get_folder_path, outputs=logging_dir_input\n )\n train_dir_input.change(\n remove_doublequote,\n inputs=[train_dir_input],\n outputs=[train_dir_input],\n )\n image_folder_input.change(\n remove_doublequote,\n inputs=[image_folder_input],\n outputs=[image_folder_input],\n )\n output_dir_input.change(\n remove_doublequote,\n inputs=[output_dir_input],\n outputs=[output_dir_input],\n )\n with gr.Tab('Training parameters'):\n with gr.Row():\n learning_rate_input = gr.Textbox(\n label='Learning rate', value=1e-6\n )\n lr_scheduler_input = gr.Dropdown(\n label='LR Scheduler',\n choices=[\n 'constant',\n 'constant_with_warmup',\n 'cosine',\n 'cosine_with_restarts',\n 'linear',\n 'polynomial',\n ],\n value='constant',\n )\n lr_warmup_input = gr.Textbox(label='LR warmup', value=0)\n with gr.Row():\n dataset_repeats_input = gr.Textbox(\n label='Dataset repeats', value=40\n )\n train_batch_size_input = gr.Slider(\n minimum=1,\n maximum=32,\n label='Train batch size',\n value=1,\n step=1,\n )\n epoch_input = gr.Textbox(label='Epoch', value=1)\n save_every_n_epochs_input = gr.Textbox(\n label='Save every N epochs', value=1\n )\n with gr.Row():\n mixed_precision_input = gr.Dropdown(\n label='Mixed precision',\n choices=[\n 'no',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n save_precision_input = gr.Dropdown(\n label='Save precision',\n choices=[\n 'float',\n 'fp16',\n 'bf16',\n ],\n value='fp16',\n )\n num_cpu_threads_per_process_input = gr.Slider(\n minimum=1,\n maximum=os.cpu_count(),\n step=1,\n label='Number of CPU threads per process',\n value=os.cpu_count(),\n )\n with gr.Row():\n seed_input = gr.Textbox(label='Seed', value=1234)\n max_resolution_input = gr.Textbox(\n label='Max resolution', value='512,512'\n )\n with gr.Row():\n caption_extention_input = gr.Textbox(\n label='Caption Extension',\n placeholder='(Optional) Extension for caption files. default: .txt',\n )\n train_text_encoder_input = gr.Checkbox(\n label='Train text encoder', value=True\n )\n with gr.Box():\n with gr.Row():\n create_caption = gr.Checkbox(\n label='Generate caption database', value=True\n )\n create_buckets = gr.Checkbox(\n label='Generate image buckets', value=True\n )\n train = gr.Checkbox(label='Train model', value=True)\n\n button_run = gr.Button('Run')\n\n button_run.click(\n train_model,\n inputs=[\n create_caption,\n create_buckets,\n train,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n )\n\n button_open_config.click(\n open_config_file,\n inputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,\n caption_extention_input,\n ],\n outputs=[\n config_file_name,\n pretrained_model_name_or_path_input,\n v2_input,\n v_parameterization_input,\n train_dir_input,\n image_folder_input,\n output_dir_input,\n logging_dir_input,\n max_resolution_input,\n learning_rate_input,\n lr_scheduler_input,\n lr_warmup_input,\n dataset_repeats_input,\n train_batch_size_input,\n epoch_input,\n save_every_n_epochs_input,\n mixed_precision_input,\n save_precision_input,\n seed_input,\n num_cpu_threads_per_process_input,\n train_text_encoder_input,\n create_buckets,\n create_caption,\n train,\n save_model_as_dropdown,", "type": "random" } ]
[ " remove_doublequote,", " save_configuration,", " open_config_file,", " train_model,", " set_pretrained_model_name_or_path_input,", " file_path = get_saveasfile_path(file_path)", " get_folder_path, outputs=output_dir_input", " file_path = get_file_path(file_path)", " get_folder_path, outputs=train_dir_input", " get_folder_path,", " get_file_path,", " get_saveasfile_path,", " get_folder_path, outputs=logging_dir_input", " gradio_basic_caption_gui_tab()", " gradio_blip_caption_gui_tab()", " gradio_wd14_caption_gui_tab()", " gradio_convert_model_tab()", " get_folder_path, outputs=image_folder_input", " my_data = json.load(f)", " my_data.get(", " my_data.get('v2', v2),", " my_data.get('v_parameterization', v_parameterization),", " my_data.get('train_dir', train_dir),", " my_data.get('image_folder', image_folder),", " my_data.get('output_dir', output_dir),", " my_data.get('logging_dir', logging_dir),", " my_data.get('max_resolution', max_resolution),", " my_data.get('learning_rate', learning_rate),", " my_data.get('lr_scheduler', lr_scheduler),", " my_data.get('lr_warmup', lr_warmup),", " my_data.get('dataset_repeats', dataset_repeats),", " my_data.get('train_batch_size', train_batch_size),", " my_data.get('epoch', epoch),", " my_data.get('save_every_n_epochs', save_every_n_epochs),", " my_data.get('mixed_precision', mixed_precision),", " my_data.get('save_precision', save_precision),", " my_data.get('seed', seed),", " my_data.get('train_text_encoder', train_text_encoder),", " my_data.get('create_buckets', create_buckets),", " my_data.get('create_caption', create_caption),", " my_data.get('save_model_as', save_model_as),", " my_data.get('caption_extension', caption_extension),", " save_as_bool = True if save_as.get('label') == 'True' else False", " if not os.path.exists(train_dir):", " train,", "if os.path.exists('./style.css'):", " if train:", " train = gr.Checkbox(label='Train model', value=True)", " my_data.get('train', train),", " 'train': train,", " print(file_path)", "import math", "", " # calculate max_train_steps", " 'train_dir': train_dir,", " image_num = len(", " with gr.Row():", " dataset_repeats_input,", " caption_extention_input," ]
METASEP
52
bmaltais__kohya_ss
bmaltais__kohya_ss METASEP tools/prune.py METASEP import argparse import torch from tqdm import tqdm parser = argparse.ArgumentParser(description="Prune a model") parser.add_argument("model_prune", type=str, help="Path to model to prune") parser.add_argument("prune_output", type=str, help="Path to pruned ckpt output") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") args = parser.parse_args() print("Loading model...") model_prune = torch.load(args.model_prune) theta_prune = model_prune["state_dict"] theta = {} print("Pruning model...") for key in tqdm(theta_prune.keys(), desc="Pruning keys"): if "model" in key: theta.update({key: theta_prune[key]}) del theta_prune if args.half: print("Halving model...") state_dict = {k: v.half() for k, v in tqdm(theta.items(), desc="Halving weights")} else: state_dict = theta del theta print("Saving pruned model...") torch.save({"state_dict": state_dict}, args.prune_output) del state_dict print("Done pruning!") tools/convert_images_to_webp.py METASEP import argparse import glob import os from pathlib import Path from PIL import Image def main(): # Define the command-line arguments parser = argparse.ArgumentParser() parser.add_argument("directory", type=str, help="the directory containing the images to be converted") parser.add_argument("--in_ext", type=str, default="webp", help="the input file extension") parser.add_argument("--delete_originals", action="store_true", help="whether to delete the original files after conversion") # Parse the command-line arguments args = parser.parse_args() directory = args.directory in_ext = args.in_ext delete_originals = args.delete_originals # Set the output file extension to .webp out_ext = "webp" # Create the file pattern string using the input file extension file_pattern = f"*.{in_ext}" # Get the list of files in the directory that match the file pattern files = glob.glob(os.path.join(directory, file_pattern)) # Iterate over the list of files for file in files: # Open the image file img = Image.open(file) # Create a new file path with the output file extension new_path = Path(file).with_suffix(f".{out_ext}") print(new_path) # Check if the output file already exists if new_path.exists(): # Skip the conversion if the output file already exists print(f"Skipping {file} because {new_path} already exists") continue # Save the image to the new file as lossless img.save(new_path, lossless=True) # Optionally, delete the original file if delete_originals: os.remove(file) if __name__ == "__main__": main() tools/convert_images_to_hq_jpg.py METASEP import argparse import glob import os from pathlib import Path from PIL import Image def main(): # Define the command-line arguments parser = argparse.ArgumentParser() parser.add_argument("directory", type=str, help="the directory containing the images to be converted") parser.add_argument("--in_ext", type=str, default="webp", help="the input file extension") parser.add_argument("--quality", type=int, default=95, help="the JPEG quality (0-100)") parser.add_argument("--delete_originals", action="store_true", help="whether to delete the original files after conversion") # Parse the command-line arguments args = parser.parse_args() directory = args.directory in_ext = args.in_ext out_ext = "jpg" quality = args.quality delete_originals = args.delete_originals # Create the file pattern string using the input file extension file_pattern = f"*.{in_ext}" # Get the list of files in the directory that match the file pattern files = glob.glob(os.path.join(directory, file_pattern)) # Iterate over the list of files for file in files: # Open the image file img = Image.open(file) # Create a new file path with the output file extension new_path = Path(file).with_suffix(f".{out_ext}") # Check if the output file already exists if new_path.exists(): # Skip the conversion if the output file already exists print(f"Skipping {file} because {new_path} already exists") continue # Save the image to the new file as high-quality JPEG img.save(new_path, quality=quality, optimize=True) # Optionally, delete the original file if delete_originals: os.remove(file) if __name__ == "__main__": main() tools/convert_diffusers20_original_sd.py METASEP # convert Diffusers v1.x/v2.0 model to original Stable Diffusion # v1: initial version # v2: support safetensors # v3: fix to support another format # v4: support safetensors in Diffusers import argparse import os import torch from diffusers import StableDiffusionPipeline from library import model_util as model_util def convert(args): # 引数を確認する load_dtype = torch.float16 if args.fp16 else None save_dtype = None if args.fp16: save_dtype = torch.float16 elif args.bf16: save_dtype = torch.bfloat16 elif args.float: save_dtype = torch.float is_load_ckpt = os.path.isfile(args.model_to_load) is_save_ckpt = len(os.path.splitext(args.model_to_save)[1]) > 0 assert not is_load_ckpt or args.v1 != args.v2, f"v1 or v2 is required to load checkpoint / checkpointの読み込みにはv1/v2指定が必要です" assert is_save_ckpt or args.reference_model is not None, f"reference model is required to save as Diffusers / Diffusers形式での保存には参照モデルが必要です" # モデルを読み込む msg = "checkpoint" if is_load_ckpt else ("Diffusers" + (" as fp16" if args.fp16 else "")) print(f"loading {msg}: {args.model_to_load}") if is_load_ckpt: v2_model = args.v2 text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(v2_model, args.model_to_load) else: pipe = StableDiffusionPipeline.from_pretrained(args.model_to_load, torch_dtype=load_dtype, tokenizer=None, safety_checker=None) text_encoder = pipe.text_encoder vae = pipe.vae unet = pipe.unet if args.v1 == args.v2: # 自動判定する v2_model = unet.config.cross_attention_dim == 1024 print("checking model version: model is " + ('v2' if v2_model else 'v1')) else: v2_model = args.v1 # 変換して保存する msg = ("checkpoint" + ("" if save_dtype is None else f" in {save_dtype}")) if is_save_ckpt else "Diffusers" print(f"converting and saving as {msg}: {args.model_to_save}") if is_save_ckpt: original_model = args.model_to_load if is_load_ckpt else None key_count = model_util.save_stable_diffusion_checkpoint(v2_model, args.model_to_save, text_encoder, unet, original_model, args.epoch, args.global_step, save_dtype, vae) print(f"model saved. total converted state_dict keys: {key_count}") else: print(f"copy scheduler/tokenizer config from: {args.reference_model}") model_util.save_diffusers_checkpoint(v2_model, args.model_to_save, text_encoder, unet, args.reference_model, vae, args.use_safetensors) print(f"model saved.") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--v1", action='store_true', help='load v1.x model (v1 or v2 is required to load checkpoint) / 1.xのモデルを読み込む') parser.add_argument("--v2", action='store_true', help='load v2.0 model (v1 or v2 is required to load checkpoint) / 2.0のモデルを読み込む') parser.add_argument("--fp16", action='store_true', help='load as fp16 (Diffusers only) and save as fp16 (checkpoint only) / fp16形式で読み込み(Diffusers形式のみ対応)、保存する(checkpointのみ対応)') parser.add_argument("--bf16", action='store_true', help='save as bf16 (checkpoint only) / bf16形式で保存する(checkpointのみ対応)') parser.add_argument("--float", action='store_true', help='save as float (checkpoint only) / float(float32)形式で保存する(checkpointのみ対応)') parser.add_argument("--epoch", type=int, default=0, help='epoch to write to checkpoint / checkpointに記録するepoch数の値') parser.add_argument("--global_step", type=int, default=0, help='global_step to write to checkpoint / checkpointに記録するglobal_stepの値') parser.add_argument("--reference_model", type=str, default=None, help="reference model for schduler/tokenizer, required in saving Diffusers, copy schduler/tokenizer from this / scheduler/tokenizerのコピー元のDiffusersモデル、Diffusers形式で保存するときに必要") parser.add_argument("--use_safetensors", action='store_true', help="use safetensors format to save Diffusers model (checkpoint depends on the file extension) / Duffusersモデルをsafetensors形式で保存する(checkpointは拡張子で自動判定)") parser.add_argument("model_to_load", type=str, default=None, help="model to load: checkpoint file or Diffusers model's directory / 読み込むモデル、checkpointかDiffusers形式モデルのディレクトリ") parser.add_argument("model_to_save", type=str, default=None, help="model to save: checkpoint (with extension) or Diffusers model's directory (without extension) / 変換後のモデル、拡張子がある場合はcheckpoint、ない場合はDiffusesモデルとして保存") args = parser.parse_args() convert(args) tools/caption.py METASEP # This script will create the caption text files in the specified folder using the specified file pattern and caption text. # # eg: python caption.py D:\some\folder\location "*.png, *.jpg, *.webp" "some caption text" import argparse # import glob # import os from pathlib import Path def create_caption_files(image_folder: str, file_pattern: str, caption_text: str, caption_file_ext: str, overwrite: bool): # Split the file patterns string and strip whitespace from each pattern patterns = [pattern.strip() for pattern in file_pattern.split(",")] # Create a Path object for the image folder folder = Path(image_folder) # Iterate over the file patterns for pattern in patterns: # Use the glob method to match the file patterns files = folder.glob(pattern) # Iterate over the matched files for file in files: # Check if a text file with the same name as the current file exists in the folder txt_file = file.with_suffix(caption_file_ext) if not txt_file.exists() or overwrite: # Create a text file with the caption text in the folder, if it does not already exist # or if the overwrite argument is True with open(txt_file, "w") as f: f.write(caption_text) def main(): # Define command-line arguments parser = argparse.ArgumentParser() parser.add_argument("image_folder", type=str, help="the folder where the image files are located") parser.add_argument("--file_pattern", type=str, default="*.png, *.jpg, *.jpeg, *.webp", help="the pattern to match the image file names") parser.add_argument("--caption_file_ext", type=str, default=".caption", help="the caption file extension.") parser.add_argument("--overwrite", action="store_true", default=False, help="whether to overwrite existing caption files") # Create a mutually exclusive group for the caption_text and caption_file arguments group = parser.add_mutually_exclusive_group() group.add_argument("--caption_text", type=str, help="the text to include in the caption files") group.add_argument("--caption_file", type=argparse.FileType("r"), help="the file containing the text to include in the caption files") # Parse the command-line arguments args = parser.parse_args() image_folder = args.image_folder file_pattern = args.file_pattern caption_file_ext = args.caption_file_ext overwrite = args.overwrite # Get the caption text from either the caption_text or caption_file argument if args.caption_text: caption_text = args.caption_text elif args.caption_file: caption_text = args.caption_file.read() # Create a Path object for the image folder folder = Path(image_folder) # Check if the image folder exists and is a directory if not folder.is_dir(): raise ValueError(f"{image_folder} is not a valid directory.") # Create the caption files create_caption_files(image_folder, file_pattern, caption_text, caption_file_ext, overwrite) if __name__ == "__main__": main() library/wd14_caption_gui.py METASEP import gradio as gr from easygui import msgbox import subprocess from .common_gui import get_folder_path def caption_images(train_data_dir, caption_extension, batch_size, thresh): # Check for caption_text_input # if caption_text_input == "": # msgbox("Caption text is missing...") # return # Check for images_dir_input if train_data_dir == '': msgbox('Image folder is missing...') return print(f'Captioning files in {train_data_dir}...') run_cmd = f'accelerate launch "./script/tag_images_by_wd14_tagger.py"' run_cmd += f' --batch_size="{int(batch_size)}"' run_cmd += f' --thresh="{thresh}"' if caption_extension != '': run_cmd += f' --caption_extension="{caption_extension}"' run_cmd += f' "{train_data_dir}"' print(run_cmd) # Run the command subprocess.run(run_cmd) print('...captioning done') ### # Gradio UI ### def gradio_wd14_caption_gui_tab(): with gr.Tab('WD14 Captioning'): gr.Markdown( 'This utility will use WD14 to caption files for each images in a folder.' ) with gr.Row(): train_data_dir = gr.Textbox( label='Image folder to caption', placeholder='Directory containing the images to caption', interactive=True, ) button_train_data_dir_input = gr.Button( '📂', elem_id='open_folder_small' ) button_train_data_dir_input.click( get_folder_path, outputs=train_data_dir ) caption_extension = gr.Textbox( label='Caption file extension', placeholder='(Optional) Default: .caption', interactive=True, ) thresh = gr.Number(value=0.35, label='Threshold') batch_size = gr.Number( value=1, label='Batch size', interactive=True ) caption_button = gr.Button('Caption images') caption_button.click( caption_images, inputs=[train_data_dir, caption_extension, batch_size, thresh], ) library/model_util.py METASEP # v1: split from train_db_fixed.py. # v2: support safetensors import math import os import torch from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextConfig from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionPipeline, UNet2DConditionModel, ) from safetensors.torch import load_file, save_file # DiffUsers版StableDiffusionのモデルパラメータ NUM_TRAIN_TIMESTEPS = 1000 BETA_START = 0.00085 BETA_END = 0.0120 UNET_PARAMS_MODEL_CHANNELS = 320 UNET_PARAMS_CHANNEL_MULT = [1, 2, 4, 4] UNET_PARAMS_ATTENTION_RESOLUTIONS = [4, 2, 1] UNET_PARAMS_IMAGE_SIZE = 32 # unused UNET_PARAMS_IN_CHANNELS = 4 UNET_PARAMS_OUT_CHANNELS = 4 UNET_PARAMS_NUM_RES_BLOCKS = 2 UNET_PARAMS_CONTEXT_DIM = 768 UNET_PARAMS_NUM_HEADS = 8 VAE_PARAMS_Z_CHANNELS = 4 VAE_PARAMS_RESOLUTION = 256 VAE_PARAMS_IN_CHANNELS = 3 VAE_PARAMS_OUT_CH = 3 VAE_PARAMS_CH = 128 VAE_PARAMS_CH_MULT = [1, 2, 4, 4] VAE_PARAMS_NUM_RES_BLOCKS = 2 # V2 V2_UNET_PARAMS_ATTENTION_HEAD_DIM = [5, 10, 20, 20] V2_UNET_PARAMS_CONTEXT_DIM = 1024 # Diffusersの設定を読み込むための参照モデル DIFFUSERS_REF_MODEL_ID_V1 = 'runwayml/stable-diffusion-v1-5' DIFFUSERS_REF_MODEL_ID_V2 = 'stabilityai/stable-diffusion-2-1' # region StableDiffusion->Diffusersの変換コード # convert_original_stable_diffusion_to_diffusers をコピーして修正している(ASL 2.0) def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return '.'.join(path.split('.')[n_shave_prefix_segments:]) else: return '.'.join(path.split('.')[:n_shave_prefix_segments]) def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace('in_layers.0', 'norm1') new_item = new_item.replace('in_layers.2', 'conv1') new_item = new_item.replace('out_layers.0', 'norm2') new_item = new_item.replace('out_layers.3', 'conv2') new_item = new_item.replace('emb_layers.1', 'time_emb_proj') new_item = new_item.replace('skip_connection', 'conv_shortcut') new_item = shave_segments( new_item, n_shave_prefix_segments=n_shave_prefix_segments ) mapping.append({'old': old_item, 'new': new_item}) return mapping def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace('nin_shortcut', 'conv_shortcut') new_item = shave_segments( new_item, n_shave_prefix_segments=n_shave_prefix_segments ) mapping.append({'old': old_item, 'new': new_item}) return mapping def renew_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item # new_item = new_item.replace('norm.weight', 'group_norm.weight') # new_item = new_item.replace('norm.bias', 'group_norm.bias') # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({'old': old_item, 'new': new_item}) return mapping def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace('norm.weight', 'group_norm.weight') new_item = new_item.replace('norm.bias', 'group_norm.bias') new_item = new_item.replace('q.weight', 'query.weight') new_item = new_item.replace('q.bias', 'query.bias') new_item = new_item.replace('k.weight', 'key.weight') new_item = new_item.replace('k.bias', 'key.bias') new_item = new_item.replace('v.weight', 'value.weight') new_item = new_item.replace('v.bias', 'value.bias') new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') new_item = shave_segments( new_item, n_shave_prefix_segments=n_shave_prefix_segments ) mapping.append({'old': old_item, 'new': new_item}) return mapping def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None, ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance( paths, list ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = ( (-1, channels) if len(old_tensor.shape) == 3 else (-1) ) num_heads = old_tensor.shape[0] // config['num_head_channels'] // 3 old_tensor = old_tensor.reshape( (num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map['query']] = query.reshape(target_shape) checkpoint[path_map['key']] = key.reshape(target_shape) checkpoint[path_map['value']] = value.reshape(target_shape) for path in paths: new_path = path['new'] # These have already been assigned if ( attention_paths_to_split is not None and new_path in attention_paths_to_split ): continue # Global renaming happens here new_path = new_path.replace('middle_block.0', 'mid_block.resnets.0') new_path = new_path.replace('middle_block.1', 'mid_block.attentions.0') new_path = new_path.replace('middle_block.2', 'mid_block.resnets.1') if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace( replacement['old'], replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if 'proj_attn.weight' in new_path: checkpoint[new_path] = old_checkpoint[path['old']][:, :, 0] else: checkpoint[new_path] = old_checkpoint[path['old']] def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ['query.weight', 'key.weight', 'value.weight'] for key in keys: if '.'.join(key.split('.')[-2:]) in attn_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] elif 'proj_attn.weight' in key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0] def linear_transformer_to_conv(checkpoint): keys = list(checkpoint.keys()) tf_keys = ['proj_in.weight', 'proj_out.weight'] for key in keys: if '.'.join(key.split('.')[-2:]) in tf_keys: if checkpoint[key].ndim == 2: checkpoint[key] = checkpoint[key].unsqueeze(2).unsqueeze(2) def convert_ldm_unet_checkpoint(v2, checkpoint, config): """ Takes a state dict and a config, and returns a converted checkpoint. """ # extract state_dict for UNet unet_state_dict = {} unet_key = 'model.diffusion_model.' keys = list(checkpoint.keys()) for key in keys: if key.startswith(unet_key): unet_state_dict[key.replace(unet_key, '')] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint['time_embedding.linear_1.weight'] = unet_state_dict[ 'time_embed.0.weight' ] new_checkpoint['time_embedding.linear_1.bias'] = unet_state_dict[ 'time_embed.0.bias' ] new_checkpoint['time_embedding.linear_2.weight'] = unet_state_dict[ 'time_embed.2.weight' ] new_checkpoint['time_embedding.linear_2.bias'] = unet_state_dict[ 'time_embed.2.bias' ] new_checkpoint['conv_in.weight'] = unet_state_dict[ 'input_blocks.0.0.weight' ] new_checkpoint['conv_in.bias'] = unet_state_dict['input_blocks.0.0.bias'] new_checkpoint['conv_norm_out.weight'] = unet_state_dict['out.0.weight'] new_checkpoint['conv_norm_out.bias'] = unet_state_dict['out.0.bias'] new_checkpoint['conv_out.weight'] = unet_state_dict['out.2.weight'] new_checkpoint['conv_out.bias'] = unet_state_dict['out.2.bias'] # Retrieves the keys for the input blocks only num_input_blocks = len( { '.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'input_blocks' in layer } ) input_blocks = { layer_id: [ key for key in unet_state_dict if f'input_blocks.{layer_id}.' in key ] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len( { '.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'middle_block' in layer } ) middle_blocks = { layer_id: [ key for key in unet_state_dict if f'middle_block.{layer_id}.' in key ] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len( { '.'.join(layer.split('.')[:2]) for layer in unet_state_dict if 'output_blocks' in layer } ) output_blocks = { layer_id: [ key for key in unet_state_dict if f'output_blocks.{layer_id}.' in key ] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config['layers_per_block'] + 1) layer_in_block_id = (i - 1) % (config['layers_per_block'] + 1) resnets = [ key for key in input_blocks[i] if f'input_blocks.{i}.0' in key and f'input_blocks.{i}.0.op' not in key ] attentions = [ key for key in input_blocks[i] if f'input_blocks.{i}.1' in key ] if f'input_blocks.{i}.0.op.weight' in unet_state_dict: new_checkpoint[ f'down_blocks.{block_id}.downsamplers.0.conv.weight' ] = unet_state_dict.pop(f'input_blocks.{i}.0.op.weight') new_checkpoint[ f'down_blocks.{block_id}.downsamplers.0.conv.bias' ] = unet_state_dict.pop(f'input_blocks.{i}.0.op.bias') paths = renew_resnet_paths(resnets) meta_path = { 'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}', } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config, ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = { 'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config, ) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_paths = renew_resnet_paths(resnet_0) assign_to_checkpoint( resnet_0_paths, new_checkpoint, unet_state_dict, config=config ) resnet_1_paths = renew_resnet_paths(resnet_1) assign_to_checkpoint( resnet_1_paths, new_checkpoint, unet_state_dict, config=config ) attentions_paths = renew_attention_paths(attentions) meta_path = {'old': 'middle_block.1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint( attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config, ) for i in range(num_output_blocks): block_id = i // (config['layers_per_block'] + 1) layer_in_block_id = i % (config['layers_per_block'] + 1) output_block_layers = [ shave_segments(name, 2) for name in output_blocks[i] ] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split('.')[0], shave_segments( layer, 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [ key for key in output_blocks[i] if f'output_blocks.{i}.0' in key ] attentions = [ key for key in output_blocks[i] if f'output_blocks.{i}.1' in key ] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) meta_path = { 'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}', } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config, ) # オリジナル: # if ["conv.weight", "conv.bias"] in output_block_list.values(): # index = list(output_block_list.values()).index(["conv.weight", "conv.bias"]) # biasとweightの順番に依存しないようにする:もっといいやり方がありそうだが for l in output_block_list.values(): l.sort() if ['conv.bias', 'conv.weight'] in output_block_list.values(): index = list(output_block_list.values()).index( ['conv.bias', 'conv.weight'] ) new_checkpoint[ f'up_blocks.{block_id}.upsamplers.0.conv.bias' ] = unet_state_dict[f'output_blocks.{i}.{index}.conv.bias'] new_checkpoint[ f'up_blocks.{block_id}.upsamplers.0.conv.weight' ] = unet_state_dict[f'output_blocks.{i}.{index}.conv.weight'] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = { 'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config, ) else: resnet_0_paths = renew_resnet_paths( output_block_layers, n_shave_prefix_segments=1 ) for path in resnet_0_paths: old_path = '.'.join(['output_blocks', str(i), path['old']]) new_path = '.'.join( [ 'up_blocks', str(block_id), 'resnets', str(layer_in_block_id), path['new'], ] ) new_checkpoint[new_path] = unet_state_dict[old_path] # SDのv2では1*1のconv2dがlinearに変わっているので、linear->convに変換する if v2: linear_transformer_to_conv(new_checkpoint) return new_checkpoint def convert_ldm_vae_checkpoint(checkpoint, config): # extract state dict for VAE vae_state_dict = {} vae_key = 'first_stage_model.' keys = list(checkpoint.keys()) for key in keys: if key.startswith(vae_key): vae_state_dict[key.replace(vae_key, '')] = checkpoint.get(key) # if len(vae_state_dict) == 0: # # 渡されたcheckpointは.ckptから読み込んだcheckpointではなくvaeのstate_dict # vae_state_dict = checkpoint new_checkpoint = {} new_checkpoint['encoder.conv_in.weight'] = vae_state_dict[ 'encoder.conv_in.weight' ] new_checkpoint['encoder.conv_in.bias'] = vae_state_dict[ 'encoder.conv_in.bias' ] new_checkpoint['encoder.conv_out.weight'] = vae_state_dict[ 'encoder.conv_out.weight' ] new_checkpoint['encoder.conv_out.bias'] = vae_state_dict[ 'encoder.conv_out.bias' ] new_checkpoint['encoder.conv_norm_out.weight'] = vae_state_dict[ 'encoder.norm_out.weight' ] new_checkpoint['encoder.conv_norm_out.bias'] = vae_state_dict[ 'encoder.norm_out.bias' ] new_checkpoint['decoder.conv_in.weight'] = vae_state_dict[ 'decoder.conv_in.weight' ] new_checkpoint['decoder.conv_in.bias'] = vae_state_dict[ 'decoder.conv_in.bias' ] new_checkpoint['decoder.conv_out.weight'] = vae_state_dict[ 'decoder.conv_out.weight' ] new_checkpoint['decoder.conv_out.bias'] = vae_state_dict[ 'decoder.conv_out.bias' ] new_checkpoint['decoder.conv_norm_out.weight'] = vae_state_dict[ 'decoder.norm_out.weight' ] new_checkpoint['decoder.conv_norm_out.bias'] = vae_state_dict[ 'decoder.norm_out.bias' ] new_checkpoint['quant_conv.weight'] = vae_state_dict['quant_conv.weight'] new_checkpoint['quant_conv.bias'] = vae_state_dict['quant_conv.bias'] new_checkpoint['post_quant_conv.weight'] = vae_state_dict[ 'post_quant_conv.weight' ] new_checkpoint['post_quant_conv.bias'] = vae_state_dict[ 'post_quant_conv.bias' ] # Retrieves the keys for the encoder down blocks only num_down_blocks = len( { '.'.join(layer.split('.')[:3]) for layer in vae_state_dict if 'encoder.down' in layer } ) down_blocks = { layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(num_down_blocks) } # Retrieves the keys for the decoder up blocks only num_up_blocks = len( { '.'.join(layer.split('.')[:3]) for layer in vae_state_dict if 'decoder.up' in layer } ) up_blocks = { layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(num_up_blocks) } for i in range(num_down_blocks): resnets = [ key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key ] if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict: new_checkpoint[ f'encoder.down_blocks.{i}.downsamplers.0.conv.weight' ] = vae_state_dict.pop(f'encoder.down.{i}.downsample.conv.weight') new_checkpoint[ f'encoder.down_blocks.{i}.downsamplers.0.conv.bias' ] = vae_state_dict.pop(f'encoder.down.{i}.downsample.conv.bias') paths = renew_vae_resnet_paths(resnets) meta_path = { 'old': f'down.{i}.block', 'new': f'down_blocks.{i}.resnets', } assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) mid_resnets = [key for key in vae_state_dict if 'encoder.mid.block' in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [ key for key in mid_resnets if f'encoder.mid.block_{i}' in key ] paths = renew_vae_resnet_paths(resnets) meta_path = { 'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}', } assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) mid_attentions = [ key for key in vae_state_dict if 'encoder.mid.attn' in key ] paths = renew_vae_attention_paths(mid_attentions) meta_path = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key ] if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict: new_checkpoint[ f'decoder.up_blocks.{i}.upsamplers.0.conv.weight' ] = vae_state_dict[f'decoder.up.{block_id}.upsample.conv.weight'] new_checkpoint[ f'decoder.up_blocks.{i}.upsamplers.0.conv.bias' ] = vae_state_dict[f'decoder.up.{block_id}.upsample.conv.bias'] paths = renew_vae_resnet_paths(resnets) meta_path = { 'old': f'up.{block_id}.block', 'new': f'up_blocks.{i}.resnets', } assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) mid_resnets = [key for key in vae_state_dict if 'decoder.mid.block' in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [ key for key in mid_resnets if f'decoder.mid.block_{i}' in key ] paths = renew_vae_resnet_paths(resnets) meta_path = { 'old': f'mid.block_{i}', 'new': f'mid_block.resnets.{i - 1}', } assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) mid_attentions = [ key for key in vae_state_dict if 'decoder.mid.attn' in key ] paths = renew_vae_attention_paths(mid_attentions) meta_path = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint( paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config, ) conv_attn_to_linear(new_checkpoint) return new_checkpoint def create_unet_diffusers_config(v2): """ Creates a config for the diffusers based on the config of the LDM model. """ # unet_params = original_config.model.params.unet_config.params block_out_channels = [ UNET_PARAMS_MODEL_CHANNELS * mult for mult in UNET_PARAMS_CHANNEL_MULT ] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = ( 'CrossAttnDownBlock2D' if resolution in UNET_PARAMS_ATTENTION_RESOLUTIONS else 'DownBlock2D' ) down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = ( 'CrossAttnUpBlock2D' if resolution in UNET_PARAMS_ATTENTION_RESOLUTIONS else 'UpBlock2D' ) up_block_types.append(block_type) resolution //= 2 config = dict( sample_size=UNET_PARAMS_IMAGE_SIZE, in_channels=UNET_PARAMS_IN_CHANNELS, out_channels=UNET_PARAMS_OUT_CHANNELS, down_block_types=tuple(down_block_types), up_block_types=tuple(up_block_types), block_out_channels=tuple(block_out_channels), layers_per_block=UNET_PARAMS_NUM_RES_BLOCKS, cross_attention_dim=UNET_PARAMS_CONTEXT_DIM if not v2 else V2_UNET_PARAMS_CONTEXT_DIM, attention_head_dim=UNET_PARAMS_NUM_HEADS if not v2 else V2_UNET_PARAMS_ATTENTION_HEAD_DIM, ) return config def create_vae_diffusers_config(): """ Creates a config for the diffusers based on the config of the LDM model. """ # vae_params = original_config.model.params.first_stage_config.params.ddconfig # _ = original_config.model.params.first_stage_config.params.embed_dim block_out_channels = [VAE_PARAMS_CH * mult for mult in VAE_PARAMS_CH_MULT] down_block_types = ['DownEncoderBlock2D'] * len(block_out_channels) up_block_types = ['UpDecoderBlock2D'] * len(block_out_channels) config = dict( sample_size=VAE_PARAMS_RESOLUTION, in_channels=VAE_PARAMS_IN_CHANNELS, out_channels=VAE_PARAMS_OUT_CH, down_block_types=tuple(down_block_types), up_block_types=tuple(up_block_types), block_out_channels=tuple(block_out_channels), latent_channels=VAE_PARAMS_Z_CHANNELS, layers_per_block=VAE_PARAMS_NUM_RES_BLOCKS, ) return config def convert_ldm_clip_checkpoint_v1(checkpoint): keys = list(checkpoint.keys()) text_model_dict = {} for key in keys: if key.startswith('cond_stage_model.transformer'): text_model_dict[ key[len('cond_stage_model.transformer.') :] ] = checkpoint[key] return text_model_dict def convert_ldm_clip_checkpoint_v2(checkpoint, max_length): # 嫌になるくらい違うぞ! def convert_key(key): if not key.startswith('cond_stage_model'): return None # common conversion key = key.replace( 'cond_stage_model.model.transformer.', 'text_model.encoder.' ) key = key.replace('cond_stage_model.model.', 'text_model.') if 'resblocks' in key: # resblocks conversion key = key.replace('.resblocks.', '.layers.') if '.ln_' in key: key = key.replace('.ln_', '.layer_norm') elif '.mlp.' in key: key = key.replace('.c_fc.', '.fc1.') key = key.replace('.c_proj.', '.fc2.') elif '.attn.out_proj' in key: key = key.replace('.attn.out_proj.', '.self_attn.out_proj.') elif '.attn.in_proj' in key: key = None # 特殊なので後で処理する else: raise ValueError(f'unexpected key in SD: {key}') elif '.positional_embedding' in key: key = key.replace( '.positional_embedding', '.embeddings.position_embedding.weight', ) elif '.text_projection' in key: key = None # 使われない??? elif '.logit_scale' in key: key = None # 使われない??? elif '.token_embedding' in key: key = key.replace( '.token_embedding.weight', '.embeddings.token_embedding.weight' ) elif '.ln_final' in key: key = key.replace('.ln_final', '.final_layer_norm') return key keys = list(checkpoint.keys()) new_sd = {} for key in keys: # remove resblocks 23 if '.resblocks.23.' in key: continue new_key = convert_key(key) if new_key is None: continue new_sd[new_key] = checkpoint[key] # attnの変換 for key in keys: if '.resblocks.23.' in key: continue if '.resblocks' in key and '.attn.in_proj_' in key: # 三つに分割 values = torch.chunk(checkpoint[key], 3) key_suffix = '.weight' if 'weight' in key else '.bias' key_pfx = key.replace( 'cond_stage_model.model.transformer.resblocks.', 'text_model.encoder.layers.', ) key_pfx = key_pfx.replace('_weight', '') key_pfx = key_pfx.replace('_bias', '') key_pfx = key_pfx.replace('.attn.in_proj', '.self_attn.') new_sd[key_pfx + 'q_proj' + key_suffix] = values[0] new_sd[key_pfx + 'k_proj' + key_suffix] = values[1] new_sd[key_pfx + 'v_proj' + key_suffix] = values[2] # position_idsの追加 new_sd['text_model.embeddings.position_ids'] = torch.Tensor( [list(range(max_length))] ).to(torch.int64) return new_sd # endregion # region Diffusers->StableDiffusion の変換コード # convert_diffusers_to_original_stable_diffusion をコピーして修正している(ASL 2.0) def conv_transformer_to_linear(checkpoint): keys = list(checkpoint.keys()) tf_keys = ['proj_in.weight', 'proj_out.weight'] for key in keys: if '.'.join(key.split('.')[-2:]) in tf_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] def convert_unet_state_dict_to_sd(v2, unet_state_dict): unet_conversion_map = [ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] unet_conversion_map_resnet = [ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] unet_conversion_map_layer = [] for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks hf_down_res_prefix = f'down_blocks.{i}.resnets.{j}.' sd_down_res_prefix = f'input_blocks.{3*i + j + 1}.0.' unet_conversion_map_layer.append( (sd_down_res_prefix, hf_down_res_prefix) ) if i < 3: # no attention layers in down_blocks.3 hf_down_atn_prefix = f'down_blocks.{i}.attentions.{j}.' sd_down_atn_prefix = f'input_blocks.{3*i + j + 1}.1.' unet_conversion_map_layer.append( (sd_down_atn_prefix, hf_down_atn_prefix) ) for j in range(3): # loop over resnets/attentions for upblocks hf_up_res_prefix = f'up_blocks.{i}.resnets.{j}.' sd_up_res_prefix = f'output_blocks.{3*i + j}.0.' unet_conversion_map_layer.append( (sd_up_res_prefix, hf_up_res_prefix) ) if i > 0: # no attention layers in up_blocks.0 hf_up_atn_prefix = f'up_blocks.{i}.attentions.{j}.' sd_up_atn_prefix = f'output_blocks.{3*i + j}.1.' unet_conversion_map_layer.append( (sd_up_atn_prefix, hf_up_atn_prefix) ) if i < 3: # no downsample in down_blocks.3 hf_downsample_prefix = f'down_blocks.{i}.downsamplers.0.conv.' sd_downsample_prefix = f'input_blocks.{3*(i+1)}.0.op.' unet_conversion_map_layer.append( (sd_downsample_prefix, hf_downsample_prefix) ) # no upsample in up_blocks.3 hf_upsample_prefix = f'up_blocks.{i}.upsamplers.0.' sd_upsample_prefix = ( f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.' ) unet_conversion_map_layer.append( (sd_upsample_prefix, hf_upsample_prefix) ) hf_mid_atn_prefix = 'mid_block.attentions.0.' sd_mid_atn_prefix = 'middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): hf_mid_res_prefix = f'mid_block.resnets.{j}.' sd_mid_res_prefix = f'middle_block.{2*j}.' unet_conversion_map_layer.append( (sd_mid_res_prefix, hf_mid_res_prefix) ) # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. mapping = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: mapping[hf_name] = sd_name for k, v in mapping.items(): if 'resnets' in k: for sd_part, hf_part in unet_conversion_map_resnet: v = v.replace(hf_part, sd_part) mapping[k] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: v = v.replace(hf_part, sd_part) mapping[k] = v new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()} if v2: conv_transformer_to_linear(new_state_dict) return new_state_dict # ================# # VAE Conversion # # ================# def reshape_weight_for_sd(w): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape, 1, 1) def convert_vae_state_dict(vae_state_dict): vae_conversion_map = [ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): hf_down_prefix = f'encoder.down_blocks.{i}.resnets.{j}.' sd_down_prefix = f'encoder.down.{i}.block.{j}.' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: hf_downsample_prefix = f'down_blocks.{i}.downsamplers.0.' sd_downsample_prefix = f'down.{i}.downsample.' vae_conversion_map.append( (sd_downsample_prefix, hf_downsample_prefix) ) hf_upsample_prefix = f'up_blocks.{i}.upsamplers.0.' sd_upsample_prefix = f'up.{3-i}.upsample.' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): hf_up_prefix = f'decoder.up_blocks.{i}.resnets.{j}.' sd_up_prefix = f'decoder.up.{3-i}.block.{j}.' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): hf_mid_res_prefix = f'mid_block.resnets.{i}.' sd_mid_res_prefix = f'mid.block_{i+1}.' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) vae_conversion_map_attn = [ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] mapping = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: v = v.replace(hf_part, sd_part) mapping[k] = v for k, v in mapping.items(): if 'attentions' in k: for sd_part, hf_part in vae_conversion_map_attn: v = v.replace(hf_part, sd_part) mapping[k] = v new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()} weights_to_convert = ['q', 'k', 'v', 'proj_out'] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f'mid.attn_1.{weight_name}.weight' in k: # print(f"Reshaping {k} for SD format") new_state_dict[k] = reshape_weight_for_sd(v) return new_state_dict # endregion # region 自作のモデル読み書きなど def is_safetensors(path): return os.path.splitext(path)[1].lower() == '.safetensors' def load_checkpoint_with_text_encoder_conversion(ckpt_path): # text encoderの格納形式が違うモデルに対応する ('text_model'がない) TEXT_ENCODER_KEY_REPLACEMENTS = [ ( 'cond_stage_model.transformer.embeddings.', 'cond_stage_model.transformer.text_model.embeddings.', ), ( 'cond_stage_model.transformer.encoder.', 'cond_stage_model.transformer.text_model.encoder.', ), ( 'cond_stage_model.transformer.final_layer_norm.', 'cond_stage_model.transformer.text_model.final_layer_norm.', ), ] if is_safetensors(ckpt_path): checkpoint = None state_dict = load_file(ckpt_path, 'cpu') else: checkpoint = torch.load(ckpt_path, map_location='cpu') if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint checkpoint = None key_reps = [] for rep_from, rep_to in TEXT_ENCODER_KEY_REPLACEMENTS: for key in state_dict.keys(): if key.startswith(rep_from): new_key = rep_to + key[len(rep_from) :] key_reps.append((key, new_key)) for key, new_key in key_reps: state_dict[new_key] = state_dict[key] del state_dict[key] return checkpoint, state_dict # TODO dtype指定の動作が怪しいので確認する text_encoderを指定形式で作れるか未確認 def load_models_from_stable_diffusion_checkpoint(v2, ckpt_path, dtype=None): _, state_dict = load_checkpoint_with_text_encoder_conversion(ckpt_path) if dtype is not None: for k, v in state_dict.items(): if type(v) is torch.Tensor: state_dict[k] = v.to(dtype) # Convert the UNet2DConditionModel model. unet_config = create_unet_diffusers_config(v2) converted_unet_checkpoint = convert_ldm_unet_checkpoint( v2, state_dict, unet_config ) unet = UNet2DConditionModel(**unet_config) info = unet.load_state_dict(converted_unet_checkpoint) print('loading u-net:', info) # Convert the VAE model. vae_config = create_vae_diffusers_config() converted_vae_checkpoint = convert_ldm_vae_checkpoint( state_dict, vae_config ) vae = AutoencoderKL(**vae_config) info = vae.load_state_dict(converted_vae_checkpoint) print('loadint vae:', info) # convert text_model if v2: converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v2( state_dict, 77 ) cfg = CLIPTextConfig( vocab_size=49408, hidden_size=1024, intermediate_size=4096, num_hidden_layers=23, num_attention_heads=16, max_position_embeddings=77, hidden_act='gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, model_type='clip_text_model', projection_dim=512, torch_dtype='float32', transformers_version='4.25.0.dev0', ) text_model = CLIPTextModel._from_config(cfg) info = text_model.load_state_dict(converted_text_encoder_checkpoint) else: converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v1( state_dict ) text_model = CLIPTextModel.from_pretrained( 'openai/clip-vit-large-patch14' ) info = text_model.load_state_dict(converted_text_encoder_checkpoint) print('loading text encoder:', info) return text_model, vae, unet def convert_text_encoder_state_dict_to_sd_v2( checkpoint, make_dummy_weights=False ): def convert_key(key): # position_idsの除去 if '.position_ids' in key: return None # common key = key.replace('text_model.encoder.', 'transformer.') key = key.replace('text_model.', '') if 'layers' in key: # resblocks conversion key = key.replace('.layers.', '.resblocks.') if '.layer_norm' in key: key = key.replace('.layer_norm', '.ln_') elif '.mlp.' in key: key = key.replace('.fc1.', '.c_fc.') key = key.replace('.fc2.', '.c_proj.') elif '.self_attn.out_proj' in key: key = key.replace('.self_attn.out_proj.', '.attn.out_proj.') elif '.self_attn.' in key: key = None # 特殊なので後で処理する else: raise ValueError(f'unexpected key in DiffUsers model: {key}') elif '.position_embedding' in key: key = key.replace( 'embeddings.position_embedding.weight', 'positional_embedding' ) elif '.token_embedding' in key: key = key.replace( 'embeddings.token_embedding.weight', 'token_embedding.weight' ) elif 'final_layer_norm' in key: key = key.replace('final_layer_norm', 'ln_final') return key keys = list(checkpoint.keys()) new_sd = {} for key in keys: new_key = convert_key(key) if new_key is None: continue new_sd[new_key] = checkpoint[key] # attnの変換 for key in keys: if 'layers' in key and 'q_proj' in key: # 三つを結合 key_q = key key_k = key.replace('q_proj', 'k_proj') key_v = key.replace('q_proj', 'v_proj') value_q = checkpoint[key_q] value_k = checkpoint[key_k] value_v = checkpoint[key_v] value = torch.cat([value_q, value_k, value_v]) new_key = key.replace( 'text_model.encoder.layers.', 'transformer.resblocks.' ) new_key = new_key.replace('.self_attn.q_proj.', '.attn.in_proj_') new_sd[new_key] = value # 最後の層などを捏造するか if make_dummy_weights: print( 'make dummy weights for resblock.23, text_projection and logit scale.' ) keys = list(new_sd.keys()) for key in keys: if key.startswith('transformer.resblocks.22.'): new_sd[key.replace('.22.', '.23.')] = new_sd[ key ].clone() # copyしないとsafetensorsの保存で落ちる # Diffusersに含まれない重みを作っておく new_sd['text_projection'] = torch.ones( (1024, 1024), dtype=new_sd[keys[0]].dtype, device=new_sd[keys[0]].device, ) new_sd['logit_scale'] = torch.tensor(1) return new_sd def save_stable_diffusion_checkpoint( v2, output_file, text_encoder, unet, ckpt_path, epochs, steps, save_dtype=None, vae=None, ): if ckpt_path is not None: # epoch/stepを参照する。またVAEがメモリ上にないときなど、もう一度VAEを含めて読み込む checkpoint, state_dict = load_checkpoint_with_text_encoder_conversion( ckpt_path ) if checkpoint is None: # safetensors または state_dictのckpt checkpoint = {} strict = False else: strict = True if 'state_dict' in state_dict: del state_dict['state_dict'] else: # 新しく作る assert ( vae is not None ), 'VAE is required to save a checkpoint without a given checkpoint' checkpoint = {} state_dict = {} strict = False def update_sd(prefix, sd): for k, v in sd.items(): key = prefix + k assert ( not strict or key in state_dict ), f'Illegal key in save SD: {key}' if save_dtype is not None: v = v.detach().clone().to('cpu').to(save_dtype) state_dict[key] = v # Convert the UNet model unet_state_dict = convert_unet_state_dict_to_sd(v2, unet.state_dict()) update_sd('model.diffusion_model.', unet_state_dict) # Convert the text encoder model if v2: make_dummy = ( ckpt_path is None ) # 参照元のcheckpointがない場合は最後の層を前の層から複製して作るなどダミーの重みを入れる text_enc_dict = convert_text_encoder_state_dict_to_sd_v2( text_encoder.state_dict(), make_dummy ) update_sd('cond_stage_model.model.', text_enc_dict) else: text_enc_dict = text_encoder.state_dict() update_sd('cond_stage_model.transformer.', text_enc_dict) # Convert the VAE if vae is not None: vae_dict = convert_vae_state_dict(vae.state_dict()) update_sd('first_stage_model.', vae_dict) # Put together new checkpoint key_count = len(state_dict.keys()) new_ckpt = {'state_dict': state_dict} if 'epoch' in checkpoint: epochs += checkpoint['epoch'] if 'global_step' in checkpoint: steps += checkpoint['global_step'] new_ckpt['epoch'] = epochs new_ckpt['global_step'] = steps if is_safetensors(output_file): # TODO Tensor以外のdictの値を削除したほうがいいか save_file(state_dict, output_file) else: torch.save(new_ckpt, output_file) return key_count def save_diffusers_checkpoint( v2, output_dir, text_encoder, unet, pretrained_model_name_or_path, vae=None, use_safetensors=False, ): if pretrained_model_name_or_path is None: # load default settings for v1/v2 if v2: pretrained_model_name_or_path = DIFFUSERS_REF_MODEL_ID_V2 else: pretrained_model_name_or_path = DIFFUSERS_REF_MODEL_ID_V1 scheduler = DDIMScheduler.from_pretrained( pretrained_model_name_or_path, subfolder='scheduler' ) tokenizer = CLIPTokenizer.from_pretrained( pretrained_model_name_or_path, subfolder='tokenizer' ) if vae is None: vae = AutoencoderKL.from_pretrained( pretrained_model_name_or_path, subfolder='vae' ) pipeline = StableDiffusionPipeline( unet=unet, text_encoder=text_encoder, vae=vae, scheduler=scheduler, tokenizer=tokenizer, safety_checker=None, feature_extractor=None, requires_safety_checker=None, ) pipeline.save_pretrained(output_dir, safe_serialization=use_safetensors) VAE_PREFIX = 'first_stage_model.' def load_vae(vae_id, dtype): print(f'load VAE: {vae_id}') if os.path.isdir(vae_id) or not os.path.isfile(vae_id): # Diffusers local/remote try: vae = AutoencoderKL.from_pretrained( vae_id, subfolder=None, torch_dtype=dtype ) except EnvironmentError as e: print(f'exception occurs in loading vae: {e}') print("retry with subfolder='vae'") vae = AutoencoderKL.from_pretrained( vae_id, subfolder='vae', torch_dtype=dtype ) return vae # local vae_config = create_vae_diffusers_config() if vae_id.endswith('.bin'): # SD 1.5 VAE on Huggingface vae_sd = torch.load(vae_id, map_location='cpu') converted_vae_checkpoint = vae_sd else: # StableDiffusion vae_model = torch.load(vae_id, map_location='cpu') vae_sd = vae_model['state_dict'] # vae only or full model full_model = False for vae_key in vae_sd: if vae_key.startswith(VAE_PREFIX): full_model = True break if not full_model: sd = {} for key, value in vae_sd.items(): sd[VAE_PREFIX + key] = value vae_sd = sd del sd # Convert the VAE model. converted_vae_checkpoint = convert_ldm_vae_checkpoint( vae_sd, vae_config ) vae = AutoencoderKL(**vae_config) vae.load_state_dict(converted_vae_checkpoint) return vae def get_epoch_ckpt_name(use_safetensors, epoch): return f'epoch-{epoch:06d}' + ( '.safetensors' if use_safetensors else '.ckpt' ) def get_last_ckpt_name(use_safetensors): return f'last' + ('.safetensors' if use_safetensors else '.ckpt') # endregion def make_bucket_resolutions( max_reso, min_size=256, max_size=1024, divisible=64 ): max_width, max_height = max_reso max_area = (max_width // divisible) * (max_height // divisible) resos = set() size = int(math.sqrt(max_area)) * divisible resos.add((size, size)) size = min_size while size <= max_size: width = size height = min(max_size, (max_area // (width // divisible)) * divisible) resos.add((width, height)) resos.add((height, width)) # # make additional resos # if width >= height and width - divisible >= min_size: # resos.add((width - divisible, height)) # resos.add((height, width - divisible)) # if height >= width and height - divisible >= min_size: # resos.add((width, height - divisible)) # resos.add((height - divisible, width)) size += divisible resos = list(resos) resos.sort() aspect_ratios = [w / h for w, h in resos] return resos, aspect_ratios if __name__ == '__main__': resos, aspect_ratios = make_bucket_resolutions((512, 768)) print(len(resos)) print(resos) print(aspect_ratios) ars = set() for ar in aspect_ratios: if ar in ars: print('error! duplicate ar:', ar) ars.add(ar) library/dreambooth_folder_creation_gui.py METASEP import gradio as gr from easygui import diropenbox, msgbox from .common_gui import get_folder_path import shutil import os def copy_info_to_Directories_tab(training_folder): img_folder = os.path.join(training_folder, 'img') if os.path.exists(os.path.join(training_folder, 'reg')): reg_folder = os.path.join(training_folder, 'reg') else: reg_folder = '' model_folder = os.path.join(training_folder, 'model') log_folder = os.path.join(training_folder, 'log') return img_folder, reg_folder, model_folder, log_folder def dreambooth_folder_preparation( util_training_images_dir_input, util_training_images_repeat_input, util_instance_prompt_input, util_regularization_images_dir_input, util_regularization_images_repeat_input, util_class_prompt_input, util_training_dir_output, ): # Check if the input variables are empty if not len(util_training_dir_output): print( "Destination training directory is missing... can't perform the required task..." ) return else: # Create the util_training_dir_output directory if it doesn't exist os.makedirs(util_training_dir_output, exist_ok=True) # Check for instance prompt if util_instance_prompt_input == '': msgbox('Instance prompt missing...') return # Check for class prompt if util_class_prompt_input == '': msgbox('Class prompt missing...') return # Create the training_dir path if util_training_images_dir_input == '': print( "Training images directory is missing... can't perform the required task..." ) return else: training_dir = os.path.join( util_training_dir_output, f'img/{int(util_training_images_repeat_input)}_{util_instance_prompt_input} {util_class_prompt_input}', ) # Remove folders if they exist if os.path.exists(training_dir): print(f'Removing existing directory {training_dir}...') shutil.rmtree(training_dir) # Copy the training images to their respective directories print(f'Copy {util_training_images_dir_input} to {training_dir}...') shutil.copytree(util_training_images_dir_input, training_dir) # Create the regularization_dir path if ( util_class_prompt_input == '' or not util_regularization_images_repeat_input > 0 ): print( 'Regularization images directory or repeats is missing... not copying regularisation images...' ) else: regularization_dir = os.path.join( util_training_dir_output, f'reg/{int(util_regularization_images_repeat_input)}_{util_class_prompt_input}', ) # Remove folders if they exist if os.path.exists(regularization_dir): print(f'Removing existing directory {regularization_dir}...') shutil.rmtree(regularization_dir) # Copy the regularisation images to their respective directories print( f'Copy {util_regularization_images_dir_input} to {regularization_dir}...' ) shutil.copytree( util_regularization_images_dir_input, regularization_dir ) print( f'Done creating kohya_ss training folder structure at {util_training_dir_output}...' ) def gradio_dreambooth_folder_creation_tab( train_data_dir_input, reg_data_dir_input, output_dir_input, logging_dir_input, ): with gr.Tab('Dreambooth folder preparation'): gr.Markdown( 'This utility will create the necessary folder structure for the training images and optional regularization images needed for the kohys_ss Dreambooth method to function correctly.' ) with gr.Row(): util_instance_prompt_input = gr.Textbox( label='Instance prompt', placeholder='Eg: asd', interactive=True, ) util_class_prompt_input = gr.Textbox( label='Class prompt', placeholder='Eg: person', interactive=True, ) with gr.Row(): util_training_images_dir_input = gr.Textbox( label='Training images', placeholder='Directory containing the training images', interactive=True, ) button_util_training_images_dir_input = gr.Button( '📂', elem_id='open_folder_small' ) button_util_training_images_dir_input.click( get_folder_path, outputs=util_training_images_dir_input ) util_training_images_repeat_input = gr.Number( label='Repeats', value=40, interactive=True, elem_id='number_input', ) with gr.Row(): util_regularization_images_dir_input = gr.Textbox( label='Regularisation images', placeholder='(Optional) Directory containing the regularisation images', interactive=True, ) button_util_regularization_images_dir_input = gr.Button( '📂', elem_id='open_folder_small' ) button_util_regularization_images_dir_input.click( get_folder_path, outputs=util_regularization_images_dir_input ) util_regularization_images_repeat_input = gr.Number( label='Repeats', value=1, interactive=True, elem_id='number_input', ) with gr.Row(): util_training_dir_output = gr.Textbox( label='Destination training directory', placeholder='Directory where formatted training and regularisation folders will be placed', interactive=True, ) button_util_training_dir_output = gr.Button( '📂', elem_id='open_folder_small' ) button_util_training_dir_output.click( get_folder_path, outputs=util_training_dir_output ) button_prepare_training_data = gr.Button('Prepare training data') button_prepare_training_data.click( dreambooth_folder_preparation, inputs=[ util_training_images_dir_input, util_training_images_repeat_input, util_instance_prompt_input, util_regularization_images_dir_input, util_regularization_images_repeat_input, util_class_prompt_input, util_training_dir_output, ], ) button_copy_info_to_Directories_tab = gr.Button( 'Copy info to Directories Tab' ) button_copy_info_to_Directories_tab.click( copy_info_to_Directories_tab, inputs=[util_training_dir_output], outputs=[ train_data_dir_input, reg_data_dir_input, output_dir_input, logging_dir_input, ], ) library/dataset_balancing_gui.py METASEP import os import re import gradio as gr from easygui import msgbox, boolbox from .common_gui import get_folder_path # def select_folder(): # # Open a file dialog to select a directory # folder = filedialog.askdirectory() # # Update the GUI to display the selected folder # selected_folder_label.config(text=folder) def dataset_balancing(concept_repeats, folder, insecure): if not concept_repeats > 0: # Display an error message if the total number of repeats is not a valid integer msgbox('Please enter a valid integer for the total number of repeats.') return concept_repeats = int(concept_repeats) # Check if folder exist if folder == '' or not os.path.isdir(folder): msgbox('Please enter a valid folder for balancing.') return pattern = re.compile(r'^\d+_.+$') # Iterate over the subdirectories in the selected folder for subdir in os.listdir(folder): if pattern.match(subdir) or insecure: # Calculate the number of repeats for the current subdirectory # Get a list of all the files in the folder files = os.listdir(os.path.join(folder, subdir)) # Filter the list to include only image files image_files = [ f for f in files if f.endswith(('.jpg', '.jpeg', '.png', '.gif', '.webp')) ] # Count the number of image files images = len(image_files) # Check if the subdirectory name starts with a number inside braces, # indicating that the repeats value should be multiplied match = re.match(r'^\{(\d+\.?\d*)\}', subdir) if match: # Multiply the repeats value by the number inside the braces repeats = max( 1, round(concept_repeats / images * float(match.group(1))) ) subdir = subdir[match.end() :] else: repeats = max(1, round(concept_repeats / images)) # Check if the subdirectory name already has a number at the beginning match = re.match(r'^\d+_', subdir) if match: # Replace the existing number with the new number old_name = os.path.join(folder, subdir) new_name = os.path.join( folder, f'{repeats}_{subdir[match.end():]}' ) else: # Add the new number at the beginning of the name old_name = os.path.join(folder, subdir) new_name = os.path.join(folder, f'{repeats}_{subdir}') os.rename(old_name, new_name) else: print( f'Skipping folder {subdir} because it does not match kohya_ss expected syntax...' ) msgbox('Dataset balancing completed...') def warning(insecure): if insecure: if boolbox( f'WARNING!!! You have asked to rename non kohya_ss <num>_<text> folders...\n\nAre you sure you want to do that?', choices=('Yes, I like danger', 'No, get me out of here'), ): return True else: return False def gradio_dataset_balancing_tab(): with gr.Tab('Dataset balancing'): gr.Markdown( 'This utility will ensure that each concept folder in the dataset folder is used equally during the training process of the dreambooth machine learning model, regardless of the number of images in each folder. It will do this by renaming the concept folders to indicate the number of times they should be repeated during training.' ) gr.Markdown( 'WARNING! The use of this utility on the wrong folder can lead to unexpected folder renaming!!!' ) with gr.Row(): select_dataset_folder_input = gr.Textbox( label='Dataset folder', placeholder='Folder containing the concepts folders to balance...', interactive=True, ) select_dataset_folder_button = gr.Button( '📂', elem_id='open_folder_small' ) select_dataset_folder_button.click( get_folder_path, outputs=select_dataset_folder_input ) total_repeats_number = gr.Number( value=1000, interactive=True, label='Training steps per concept per epoch', ) with gr.Accordion('Advanced options', open=False): insecure = gr.Checkbox( value=False, label='DANGER!!! -- Insecure folder renaming -- DANGER!!!', ) insecure.change(warning, inputs=insecure, outputs=insecure) balance_button = gr.Button('Balance dataset') balance_button.click( dataset_balancing, inputs=[ total_repeats_number, select_dataset_folder_input, insecure, ], ) library/convert_model_gui.py METASEP import gradio as gr from easygui import msgbox import subprocess import os import shutil from .common_gui import get_folder_path, get_file_path folder_symbol = '\U0001f4c2' # 📂 refresh_symbol = '\U0001f504' # 🔄 save_style_symbol = '\U0001f4be' # 💾 document_symbol = '\U0001F4C4' # 📄 def convert_model( source_model_input, source_model_type, target_model_folder_input, target_model_name_input, target_model_type, target_save_precision_type, ): # Check for caption_text_input if source_model_type == '': msgbox('Invalid source model type') return # Check if source model exist if os.path.isfile(source_model_input): print('The provided source model is a file') elif os.path.isdir(source_model_input): print('The provided model is a folder') else: msgbox('The provided source model is neither a file nor a folder') return # Check if source model exist if os.path.isdir(target_model_folder_input): print('The provided model folder exist') else: msgbox('The provided target folder does not exist') return run_cmd = f'.\\venv\Scripts\python.exe "tools/convert_diffusers20_original_sd.py"' v1_models = [ 'runwayml/stable-diffusion-v1-5', 'CompVis/stable-diffusion-v1-4', ] # check if v1 models if str(source_model_type) in v1_models: print('SD v1 model specified. Setting --v1 parameter') run_cmd += ' --v1' else: print('SD v2 model specified. Setting --v2 parameter') run_cmd += ' --v2' if not target_save_precision_type == 'unspecified': run_cmd += f' --{target_save_precision_type}' if ( target_model_type == 'diffuser' or target_model_type == 'diffuser_safetensors' ): run_cmd += f' --reference_model="{source_model_type}"' if target_model_type == 'diffuser_safetensors': run_cmd += ' --use_safetensors' run_cmd += f' "{source_model_input}"' if ( target_model_type == 'diffuser' or target_model_type == 'diffuser_safetensors' ): target_model_path = os.path.join( target_model_folder_input, target_model_name_input ) run_cmd += f' "{target_model_path}"' else: target_model_path = os.path.join( target_model_folder_input, f'{target_model_name_input}.{target_model_type}', ) run_cmd += f' "{target_model_path}"' print(run_cmd) # Run the command subprocess.run(run_cmd) if ( not target_model_type == 'diffuser' or target_model_type == 'diffuser_safetensors' ): v2_models = [ 'stabilityai/stable-diffusion-2-1-base', 'stabilityai/stable-diffusion-2-base', ] v_parameterization = [ 'stabilityai/stable-diffusion-2-1', 'stabilityai/stable-diffusion-2', ] if str(source_model_type) in v2_models: inference_file = os.path.join( target_model_folder_input, f'{target_model_name_input}.yaml' ) print(f'Saving v2-inference.yaml as {inference_file}') shutil.copy( f'./v2_inference/v2-inference.yaml', f'{inference_file}', ) if str(source_model_type) in v_parameterization: inference_file = os.path.join( target_model_folder_input, f'{target_model_name_input}.yaml' ) print(f'Saving v2-inference-v.yaml as {inference_file}') shutil.copy( f'./v2_inference/v2-inference-v.yaml', f'{inference_file}', ) # parser = argparse.ArgumentParser() # parser.add_argument("--v1", action='store_true', # help='load v1.x model (v1 or v2 is required to load checkpoint) / 1.xのモデルを読み込む') # parser.add_argument("--v2", action='store_true', # help='load v2.0 model (v1 or v2 is required to load checkpoint) / 2.0のモデルを読み込む') # parser.add_argument("--fp16", action='store_true', # help='load as fp16 (Diffusers only) and save as fp16 (checkpoint only) / fp16形式で読み込み(Diffusers形式のみ対応)、保存する(checkpointのみ対応)') # parser.add_argument("--bf16", action='store_true', help='save as bf16 (checkpoint only) / bf16形式で保存する(checkpointのみ対応)') # parser.add_argument("--float", action='store_true', # help='save as float (checkpoint only) / float(float32)形式で保存する(checkpointのみ対応)') # parser.add_argument("--epoch", type=int, default=0, help='epoch to write to checkpoint / checkpointに記録するepoch数の値') # parser.add_argument("--global_step", type=int, default=0, # help='global_step to write to checkpoint / checkpointに記録するglobal_stepの値') # parser.add_argument("--reference_model", type=str, default=None, # help="reference model for schduler/tokenizer, required in saving Diffusers, copy schduler/tokenizer from this / scheduler/tokenizerのコピー元のDiffusersモデル、Diffusers形式で保存するときに必要") # parser.add_argument("model_to_load", type=str, default=None, # help="model to load: checkpoint file or Diffusers model's directory / 読み込むモデル、checkpointかDiffusers形式モデルのディレクトリ") # parser.add_argument("model_to_save", type=str, default=None, # help="model to save: checkpoint (with extension) or Diffusers model's directory (without extension) / 変換後のモデル、拡張子がある場合はcheckpoint、ない場合はDiffusesモデルとして保存") ### # Gradio UI ### def gradio_convert_model_tab(): with gr.Tab('Convert model'): gr.Markdown( 'This utility can be used to convert from one stable diffusion model format to another.' ) with gr.Row(): source_model_input = gr.Textbox( label='Source model', placeholder='path to source model folder of file to convert...', interactive=True, ) button_source_model_dir = gr.Button( folder_symbol, elem_id='open_folder_small' ) button_source_model_dir.click( get_folder_path, outputs=source_model_input ) button_source_model_file = gr.Button( document_symbol, elem_id='open_folder_small' ) button_source_model_file.click( get_file_path, inputs=[source_model_input], outputs=source_model_input, ) source_model_type = gr.Dropdown( label='Source model type', choices=[ 'stabilityai/stable-diffusion-2-1-base', 'stabilityai/stable-diffusion-2-base', 'stabilityai/stable-diffusion-2-1', 'stabilityai/stable-diffusion-2', 'runwayml/stable-diffusion-v1-5', 'CompVis/stable-diffusion-v1-4', ], ) with gr.Row(): target_model_folder_input = gr.Textbox( label='Target model folder', placeholder='path to target model folder of file name to create...', interactive=True, ) button_target_model_folder = gr.Button( folder_symbol, elem_id='open_folder_small' ) button_target_model_folder.click( get_folder_path, outputs=target_model_folder_input ) target_model_name_input = gr.Textbox( label='Target model name', placeholder='target model name...', interactive=True, ) target_model_type = gr.Dropdown( label='Target model type', choices=[ 'diffuser', 'diffuser_safetensors', 'ckpt', 'safetensors', ], ) target_save_precision_type = gr.Dropdown( label='Target model precison', choices=['unspecified', 'fp16', 'bf16', 'float'], value='unspecified', ) convert_button = gr.Button('Convert model') convert_button.click( convert_model, inputs=[ source_model_input, source_model_type, target_model_folder_input, target_model_name_input, target_model_type, target_save_precision_type, ], ) library/common_gui.py METASEP from tkinter import filedialog, Tk import os def get_file_path(file_path='', defaultextension='.json'): current_file_path = file_path # print(f'current file path: {current_file_path}') root = Tk() root.wm_attributes('-topmost', 1) root.withdraw() file_path = filedialog.askopenfilename( filetypes=(('Config files', '*.json'), ('All files', '*')), defaultextension=defaultextension, ) root.destroy() if file_path == '': file_path = current_file_path return file_path def remove_doublequote(file_path): if file_path != None: file_path = file_path.replace('"', '') return file_path def get_folder_path(folder_path=''): current_folder_path = folder_path root = Tk() root.wm_attributes('-topmost', 1) root.withdraw() folder_path = filedialog.askdirectory() root.destroy() if folder_path == '': folder_path = current_folder_path return folder_path def get_saveasfile_path(file_path='', defaultextension='.json'): current_file_path = file_path # print(f'current file path: {current_file_path}') root = Tk() root.wm_attributes('-topmost', 1) root.withdraw() save_file_path = filedialog.asksaveasfile( filetypes=(('Config files', '*.json'), ('All files', '*')), defaultextension=defaultextension, ) root.destroy() # print(save_file_path) if save_file_path == None: file_path = current_file_path else: print(save_file_path.name) file_path = save_file_path.name # print(file_path) return file_path def add_pre_postfix( folder='', prefix='', postfix='', caption_file_ext='.caption' ): files = [f for f in os.listdir(folder) if f.endswith(caption_file_ext)] if not prefix == '': prefix = f'{prefix} ' if not postfix == '': postfix = f' {postfix}' for file in files: with open(os.path.join(folder, file), 'r+') as f: content = f.read() content = content.rstrip() f.seek(0, 0) f.write(f'{prefix}{content}{postfix}') f.close() library/blip_caption_gui.py METASEP import gradio as gr from easygui import msgbox import subprocess import os from .common_gui import get_folder_path, add_pre_postfix def caption_images( train_data_dir, caption_file_ext, batch_size, num_beams, top_p, max_length, min_length, beam_search, prefix, postfix, ): # Check for caption_text_input # if caption_text_input == "": # msgbox("Caption text is missing...") # return # Check for images_dir_input if train_data_dir == '': msgbox('Image folder is missing...') return print(f'Captioning files in {train_data_dir}...') run_cmd = f'.\\venv\\Scripts\\python.exe "./BLIP_caption/make_captions.py"' run_cmd += f' --batch_size="{int(batch_size)}"' run_cmd += f' --num_beams="{int(num_beams)}"' run_cmd += f' --top_p="{top_p}"' run_cmd += f' --max_length="{int(max_length)}"' run_cmd += f' --min_length="{int(min_length)}"' if beam_search: run_cmd += f' --beam_search' if caption_file_ext != '': run_cmd += f' --caption_extension="{caption_file_ext}"' run_cmd += f' "{train_data_dir}"' run_cmd += f' "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth"' print(run_cmd) # Run the command subprocess.run(run_cmd) # Add prefix and postfix add_pre_postfix( folder=train_data_dir, caption_file_ext=caption_file_ext, prefix=prefix, postfix=postfix, ) print('...captioning done') ### # Gradio UI ### def gradio_blip_caption_gui_tab(): with gr.Tab('BLIP Captioning'): gr.Markdown( 'This utility will use BLIP to caption files for each images in a folder.' ) with gr.Row(): train_data_dir = gr.Textbox( label='Image folder to caption', placeholder='Directory containing the images to caption', interactive=True, ) button_train_data_dir_input = gr.Button( '📂', elem_id='open_folder_small' ) button_train_data_dir_input.click( get_folder_path, outputs=train_data_dir ) with gr.Row(): caption_file_ext = gr.Textbox( label='Caption file extension', placeholder='(Optional) Default: .caption', interactive=True, ) prefix = gr.Textbox( label='Prefix to add to BLIP caption', placeholder='(Optional)', interactive=True, ) postfix = gr.Textbox( label='Postfix to add to BLIP caption', placeholder='(Optional)', interactive=True, ) batch_size = gr.Number( value=1, label='Batch size', interactive=True ) with gr.Row(): beam_search = gr.Checkbox( label='Use beam search', interactive=True, value=True ) num_beams = gr.Number( value=1, label='Number of beams', interactive=True ) top_p = gr.Number(value=0.9, label='Top p', interactive=True) max_length = gr.Number( value=75, label='Max length', interactive=True ) min_length = gr.Number( value=5, label='Min length', interactive=True ) caption_button = gr.Button('Caption images') caption_button.click( caption_images, inputs=[ train_data_dir, caption_file_ext, batch_size, num_beams, top_p, max_length, min_length, beam_search, prefix, postfix, ], ) library/basic_caption_gui.py METASEP import gradio as gr from easygui import msgbox import subprocess from .common_gui import get_folder_path, add_pre_postfix def caption_images( caption_text_input, images_dir_input, overwrite_input, caption_file_ext, prefix, postfix, ): # Check for images_dir_input if images_dir_input == '': msgbox('Image folder is missing...') return if not caption_text_input == '': print( f'Captioning files in {images_dir_input} with {caption_text_input}...' ) run_cmd = f'python "tools/caption.py"' run_cmd += f' --caption_text="{caption_text_input}"' if overwrite_input: run_cmd += f' --overwrite' if caption_file_ext != '': run_cmd += f' --caption_file_ext="{caption_file_ext}"' run_cmd += f' "{images_dir_input}"' print(run_cmd) # Run the command subprocess.run(run_cmd) if overwrite_input: # Add prefix and postfix add_pre_postfix( folder=images_dir_input, caption_file_ext=caption_file_ext, prefix=prefix, postfix=postfix, ) else: if not prefix == '' or not postfix == '': msgbox( 'Could not modify caption files with requested change because the "Overwrite existing captions in folder" option is not selected...' ) print('...captioning done') ### # Gradio UI ### def gradio_basic_caption_gui_tab(): with gr.Tab('Basic Captioning'): gr.Markdown( 'This utility will allow the creation of simple caption files for each images in a folder.' ) with gr.Row(): images_dir_input = gr.Textbox( label='Image folder to caption', placeholder='Directory containing the images to caption', interactive=True, ) button_images_dir_input = gr.Button( '📂', elem_id='open_folder_small' ) button_images_dir_input.click( get_folder_path, outputs=images_dir_input ) with gr.Row(): prefix = gr.Textbox( label='Prefix to add to txt caption', placeholder='(Optional)', interactive=True, ) caption_text_input = gr.Textbox( label='Caption text', placeholder='Eg: , by some artist. Leave empti if you just want to add pre or postfix', interactive=True, ) postfix = gr.Textbox( label='Postfix to add to txt caption', placeholder='(Optional)', interactive=True, ) with gr.Row(): overwrite_input = gr.Checkbox( label='Overwrite existing captions in folder', interactive=True, value=False, ) caption_file_ext = gr.Textbox( label='Caption file extension', placeholder='(Optional) Default: .caption', interactive=True, ) caption_button = gr.Button('Caption images') caption_button.click( caption_images, inputs=[ caption_text_input, images_dir_input, overwrite_input, caption_file_ext, prefix, postfix, ], ) library/__init__.py METASEP bitsandbytes_windows/main.py METASEP """ extract factors the build is dependent on: [X] compute capability [ ] TODO: Q - What if we have multiple GPUs of different makes? - CUDA version - Software: - CPU-only: only CPU quantization functions (no optimizer, no matrix multipl) - CuBLAS-LT: full-build 8-bit optimizer - no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`) evaluation: - if paths faulty, return meaningful error - else: - determine CUDA version - determine capabilities - based on that set the default path """ import ctypes from .paths import determine_cuda_runtime_lib_path def check_cuda_result(cuda, result_val): # 3. Check for CUDA errors if result_val != 0: error_str = ctypes.c_char_p() cuda.cuGetErrorString(result_val, ctypes.byref(error_str)) print(f"CUDA exception! Error code: {error_str.value.decode()}") def get_cuda_version(cuda, cudart_path): # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION try: cudart = ctypes.CDLL(cudart_path) except OSError: # TODO: shouldn't we error or at least warn here? print(f'ERROR: libcudart.so could not be read from path: {cudart_path}!') return None version = ctypes.c_int() check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version))) version = int(version.value) major = version//1000 minor = (version-(major*1000))//10 if major < 11: print('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') return f'{major}{minor}' def get_cuda_lib_handle(): # 1. find libcuda.so library (GPU driver) (/usr/lib) try: cuda = ctypes.CDLL("libcuda.so") except OSError: # TODO: shouldn't we error or at least warn here? print('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!') return None check_cuda_result(cuda, cuda.cuInit(0)) return cuda def get_compute_capabilities(cuda): """ 1. find libcuda.so library (GPU driver) (/usr/lib) init_device -> init variables -> call function by reference 2. call extern C function to determine CC (https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html) 3. Check for CUDA errors https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api # bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549 """ nGpus = ctypes.c_int() cc_major = ctypes.c_int() cc_minor = ctypes.c_int() device = ctypes.c_int() check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus))) ccs = [] for i in range(nGpus.value): check_cuda_result(cuda, cuda.cuDeviceGet(ctypes.byref(device), i)) ref_major = ctypes.byref(cc_major) ref_minor = ctypes.byref(cc_minor) # 2. call extern C function to determine CC check_cuda_result( cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device) ) ccs.append(f"{cc_major.value}.{cc_minor.value}") return ccs # def get_compute_capability()-> Union[List[str, ...], None]: # FIXME: error def get_compute_capability(cuda): """ Extracts the highest compute capbility from all available GPUs, as compute capabilities are downwards compatible. If no GPUs are detected, it returns None. """ ccs = get_compute_capabilities(cuda) if ccs is not None: # TODO: handle different compute capabilities; for now, take the max return ccs[-1] return None def evaluate_cuda_setup(): print('') print('='*35 + 'BUG REPORT' + '='*35) print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues') print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link') print('='*80) return "libbitsandbytes_cuda116.dll" # $$$ binary_name = "libbitsandbytes_cpu.so" #if not torch.cuda.is_available(): #print('No GPU detected. Loading CPU library...') #return binary_name cudart_path = determine_cuda_runtime_lib_path() if cudart_path is None: print( "WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!" ) return binary_name print(f"CUDA SETUP: CUDA runtime path found: {cudart_path}") cuda = get_cuda_lib_handle() cc = get_compute_capability(cuda) print(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}") cuda_version_string = get_cuda_version(cuda, cudart_path) if cc == '': print( "WARNING: No GPU detected! Check your CUDA paths. Processing to load CPU-only library..." ) return binary_name # 7.5 is the minimum CC vor cublaslt has_cublaslt = cc in ["7.5", "8.0", "8.6"] # TODO: # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) # (2) Multiple CUDA versions installed # we use ls -l instead of nvcc to determine the cuda version # since most installations will have the libcudart.so installed, but not the compiler print(f'CUDA SETUP: Detected CUDA version {cuda_version_string}') def get_binary_name(): "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" bin_base_name = "libbitsandbytes_cuda" if has_cublaslt: return f"{bin_base_name}{cuda_version_string}.so" else: return f"{bin_base_name}{cuda_version_string}_nocublaslt.so" binary_name = get_binary_name() return binary_name bitsandbytes_windows/cextension.py METASEP import ctypes as ct from pathlib import Path from warnings import warn from .cuda_setup.main import evaluate_cuda_setup class CUDALibrary_Singleton(object): _instance = None def __init__(self): raise RuntimeError("Call get_instance() instead") def initialize(self): binary_name = evaluate_cuda_setup() package_dir = Path(__file__).parent binary_path = package_dir / binary_name if not binary_path.exists(): print(f"CUDA SETUP: TODO: compile library for specific version: {binary_name}") legacy_binary_name = "libbitsandbytes.so" print(f"CUDA SETUP: Defaulting to {legacy_binary_name}...") binary_path = package_dir / legacy_binary_name if not binary_path.exists(): print('CUDA SETUP: CUDA detection failed. Either CUDA driver not installed, CUDA not installed, or you have multiple conflicting CUDA libraries!') print('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.') raise Exception('CUDA SETUP: Setup Failed!') # self.lib = ct.cdll.LoadLibrary(binary_path) self.lib = ct.cdll.LoadLibrary(str(binary_path)) # $$$ else: print(f"CUDA SETUP: Loading binary {binary_path}...") # self.lib = ct.cdll.LoadLibrary(binary_path) self.lib = ct.cdll.LoadLibrary(str(binary_path)) # $$$ @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls.__new__(cls) cls._instance.initialize() return cls._instance lib = CUDALibrary_Singleton.get_instance().lib try: lib.cadam32bit_g32 lib.get_context.restype = ct.c_void_p lib.get_cusparse.restype = ct.c_void_p COMPILED_WITH_CUDA = True except AttributeError: warn( "The installed version of bitsandbytes was compiled without GPU support. " "8-bit optimizers and GPU quantization are unavailable." ) COMPILED_WITH_CUDA = False setup.py METASEP from setuptools import setup, find_packages setup(name = "library", packages = find_packages()) dreambooth_gui.py METASEP # v1: initial release # v2: add open and save folder icons # v3: Add new Utilities tab for Dreambooth folder preparation # v3.1: Adding captionning of images to utilities import gradio as gr import json import math import os import subprocess import pathlib import shutil from library.dreambooth_folder_creation_gui import ( gradio_dreambooth_folder_creation_tab, ) from library.basic_caption_gui import gradio_basic_caption_gui_tab from library.convert_model_gui import gradio_convert_model_tab from library.blip_caption_gui import gradio_blip_caption_gui_tab from library.wd14_caption_gui import gradio_wd14_caption_gui_tab from library.dataset_balancing_gui import gradio_dataset_balancing_tab from library.common_gui import ( get_folder_path, remove_doublequote, get_file_path, get_saveasfile_path, ) from easygui import msgbox folder_symbol = '\U0001f4c2' # 📂 refresh_symbol = '\U0001f504' # 🔄 save_style_symbol = '\U0001f4be' # 💾 document_symbol = '\U0001F4C4' # 📄 def save_configuration( save_as, file_path, pretrained_model_name_or_path, v2, v_parameterization, logging_dir, train_data_dir, reg_data_dir, output_dir, max_resolution, learning_rate, lr_scheduler, lr_warmup, train_batch_size, epoch, save_every_n_epochs, mixed_precision, save_precision, seed, num_cpu_threads_per_process, cache_latent, caption_extention, enable_bucket, gradient_checkpointing, full_fp16, no_token_padding, stop_text_encoder_training, use_8bit_adam, xformers, save_model_as, shuffle_caption, save_state, resume, prior_loss_weight, ): original_file_path = file_path save_as_bool = True if save_as.get('label') == 'True' else False if save_as_bool: print('Save as...') file_path = get_saveasfile_path(file_path) else: print('Save...') if file_path == None or file_path == '': file_path = get_saveasfile_path(file_path) # print(file_path) if file_path == None or file_path == '': return original_file_path # In case a file_path was provided and the user decide to cancel the open action # Return the values of the variables as a dictionary variables = { 'pretrained_model_name_or_path': pretrained_model_name_or_path, 'v2': v2, 'v_parameterization': v_parameterization, 'logging_dir': logging_dir, 'train_data_dir': train_data_dir, 'reg_data_dir': reg_data_dir, 'output_dir': output_dir, 'max_resolution': max_resolution, 'learning_rate': learning_rate, 'lr_scheduler': lr_scheduler, 'lr_warmup': lr_warmup, 'train_batch_size': train_batch_size, 'epoch': epoch, 'save_every_n_epochs': save_every_n_epochs, 'mixed_precision': mixed_precision, 'save_precision': save_precision, 'seed': seed, 'num_cpu_threads_per_process': num_cpu_threads_per_process, 'cache_latent': cache_latent, 'caption_extention': caption_extention, 'enable_bucket': enable_bucket, 'gradient_checkpointing': gradient_checkpointing, 'full_fp16': full_fp16, 'no_token_padding': no_token_padding, 'stop_text_encoder_training': stop_text_encoder_training, 'use_8bit_adam': use_8bit_adam, 'xformers': xformers, 'save_model_as': save_model_as, 'shuffle_caption': shuffle_caption, 'save_state': save_state, 'resume': resume, 'prior_loss_weight': prior_loss_weight, } # Save the data to the selected file with open(file_path, 'w') as file: json.dump(variables, file) return file_path def open_configuration( file_path, pretrained_model_name_or_path, v2, v_parameterization, logging_dir, train_data_dir, reg_data_dir, output_dir, max_resolution, learning_rate, lr_scheduler, lr_warmup, train_batch_size, epoch, save_every_n_epochs, mixed_precision, save_precision, seed, num_cpu_threads_per_process, cache_latent, caption_extention, enable_bucket, gradient_checkpointing, full_fp16, no_token_padding, stop_text_encoder_training, use_8bit_adam, xformers, save_model_as, shuffle_caption, save_state, resume, prior_loss_weight, ): original_file_path = file_path file_path = get_file_path(file_path) # print(file_path) if not file_path == '' and not file_path == None: # load variables from JSON file with open(file_path, 'r') as f: my_data = json.load(f) else: file_path = original_file_path # In case a file_path was provided and the user decide to cancel the open action my_data = {} # Return the values of the variables as a dictionary return ( file_path, my_data.get( 'pretrained_model_name_or_path', pretrained_model_name_or_path ), my_data.get('v2', v2), my_data.get('v_parameterization', v_parameterization), my_data.get('logging_dir', logging_dir), my_data.get('train_data_dir', train_data_dir), my_data.get('reg_data_dir', reg_data_dir), my_data.get('output_dir', output_dir), my_data.get('max_resolution', max_resolution), my_data.get('learning_rate', learning_rate), my_data.get('lr_scheduler', lr_scheduler), my_data.get('lr_warmup', lr_warmup), my_data.get('train_batch_size', train_batch_size), my_data.get('epoch', epoch), my_data.get('save_every_n_epochs', save_every_n_epochs), my_data.get('mixed_precision', mixed_precision), my_data.get('save_precision', save_precision), my_data.get('seed', seed), my_data.get( 'num_cpu_threads_per_process', num_cpu_threads_per_process ), my_data.get('cache_latent', cache_latent), my_data.get('caption_extention', caption_extention), my_data.get('enable_bucket', enable_bucket), my_data.get('gradient_checkpointing', gradient_checkpointing), my_data.get('full_fp16', full_fp16), my_data.get('no_token_padding', no_token_padding), my_data.get('stop_text_encoder_training', stop_text_encoder_training), my_data.get('use_8bit_adam', use_8bit_adam), my_data.get('xformers', xformers), my_data.get('save_model_as', save_model_as), my_data.get('shuffle_caption', shuffle_caption), my_data.get('save_state', save_state), my_data.get('resume', resume), my_data.get('prior_loss_weight', prior_loss_weight), ) def train_model( pretrained_model_name_or_path, v2, v_parameterization, logging_dir, train_data_dir, reg_data_dir, output_dir, max_resolution, learning_rate, lr_scheduler, lr_warmup, train_batch_size, epoch, save_every_n_epochs, mixed_precision, save_precision, seed, num_cpu_threads_per_process, cache_latent, caption_extention, enable_bucket, gradient_checkpointing, full_fp16, no_token_padding, stop_text_encoder_training_pct, use_8bit_adam, xformers, save_model_as, shuffle_caption, save_state, resume, prior_loss_weight, ): def save_inference_file(output_dir, v2, v_parameterization): # Copy inference model for v2 if required if v2 and v_parameterization: print(f'Saving v2-inference-v.yaml as {output_dir}/last.yaml') shutil.copy( f'./v2_inference/v2-inference-v.yaml', f'{output_dir}/last.yaml', ) elif v2: print(f'Saving v2-inference.yaml as {output_dir}/last.yaml') shutil.copy( f'./v2_inference/v2-inference.yaml', f'{output_dir}/last.yaml', ) if pretrained_model_name_or_path == '': msgbox('Source model information is missing') return if train_data_dir == '': msgbox('Image folder path is missing') return if not os.path.exists(train_data_dir): msgbox('Image folder does not exist') return if reg_data_dir != '': if not os.path.exists(reg_data_dir): msgbox('Regularisation folder does not exist') return if output_dir == '': msgbox('Output folder path is missing') return # Get a list of all subfolders in train_data_dir subfolders = [ f for f in os.listdir(train_data_dir) if os.path.isdir(os.path.join(train_data_dir, f)) ] total_steps = 0 # Loop through each subfolder and extract the number of repeats for folder in subfolders: # Extract the number of repeats from the folder name repeats = int(folder.split('_')[0]) # Count the number of images in the folder num_images = len( [ f for f in os.listdir(os.path.join(train_data_dir, folder)) if f.endswith('.jpg') or f.endswith('.jpeg') or f.endswith('.png') or f.endswith('.webp') ] ) # Calculate the total number of steps for this folder steps = repeats * num_images total_steps += steps # Print the result print(f'Folder {folder}: {steps} steps') # Print the result # print(f"{total_steps} total steps") if reg_data_dir == '': reg_factor = 1 else: print( 'Regularisation images are used... Will double the number of steps required...' ) reg_factor = 2 # calculate max_train_steps max_train_steps = int( math.ceil( float(total_steps) / int(train_batch_size) * int(epoch) * int(reg_factor) ) ) print(f'max_train_steps = {max_train_steps}') # calculate stop encoder training if stop_text_encoder_training_pct == None: stop_text_encoder_training = 0 else: stop_text_encoder_training = math.ceil( float(max_train_steps) / 100 * int(stop_text_encoder_training_pct) ) print(f'stop_text_encoder_training = {stop_text_encoder_training}') lr_warmup_steps = round(float(int(lr_warmup) * int(max_train_steps) / 100)) print(f'lr_warmup_steps = {lr_warmup_steps}') run_cmd = f'accelerate launch --num_cpu_threads_per_process={num_cpu_threads_per_process} "train_db_fixed.py"' if v2: run_cmd += ' --v2' if v_parameterization: run_cmd += ' --v_parameterization' if cache_latent: run_cmd += ' --cache_latents' if enable_bucket: run_cmd += ' --enable_bucket' if gradient_checkpointing: run_cmd += ' --gradient_checkpointing' if full_fp16: run_cmd += ' --full_fp16' if no_token_padding: run_cmd += ' --no_token_padding' if use_8bit_adam: run_cmd += ' --use_8bit_adam' if xformers: run_cmd += ' --xformers' if shuffle_caption: run_cmd += ' --shuffle_caption' if save_state: run_cmd += ' --save_state' run_cmd += ( f' --pretrained_model_name_or_path={pretrained_model_name_or_path}' ) run_cmd += f' --train_data_dir="{train_data_dir}"' if len(reg_data_dir): run_cmd += f' --reg_data_dir="{reg_data_dir}"' run_cmd += f' --resolution={max_resolution}' run_cmd += f' --output_dir={output_dir}' run_cmd += f' --train_batch_size={train_batch_size}' run_cmd += f' --learning_rate={learning_rate}' run_cmd += f' --lr_scheduler={lr_scheduler}' run_cmd += f' --lr_warmup_steps={lr_warmup_steps}' run_cmd += f' --max_train_steps={max_train_steps}' run_cmd += f' --use_8bit_adam' run_cmd += f' --xformers' run_cmd += f' --mixed_precision={mixed_precision}' run_cmd += f' --save_every_n_epochs={save_every_n_epochs}' run_cmd += f' --seed={seed}' run_cmd += f' --save_precision={save_precision}' run_cmd += f' --logging_dir={logging_dir}' run_cmd += f' --caption_extention={caption_extention}' if not stop_text_encoder_training == 0: run_cmd += ( f' --stop_text_encoder_training={stop_text_encoder_training}' ) if not save_model_as == 'same as source model': run_cmd += f' --save_model_as={save_model_as}' if not resume == '': run_cmd += f' --resume={resume}' if not float(prior_loss_weight) == 1.0: run_cmd += f' --prior_loss_weight={prior_loss_weight}' print(run_cmd) # Run the command subprocess.run(run_cmd) # check if output_dir/last is a folder... therefore it is a diffuser model last_dir = pathlib.Path(f'{output_dir}/last') if not last_dir.is_dir(): # Copy inference model for v2 if required save_inference_file(output_dir, v2, v_parameterization) def set_pretrained_model_name_or_path_input(value, v2, v_parameterization): # define a list of substrings to search for substrings_v2 = [ 'stabilityai/stable-diffusion-2-1-base', 'stabilityai/stable-diffusion-2-base', ] # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list if str(value) in substrings_v2: print('SD v2 model detected. Setting --v2 parameter') v2 = True v_parameterization = False return value, v2, v_parameterization # define a list of substrings to search for v-objective substrings_v_parameterization = [ 'stabilityai/stable-diffusion-2-1', 'stabilityai/stable-diffusion-2', ] # check if $v2 and $v_parameterization are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_parameterization list if str(value) in substrings_v_parameterization: print( 'SD v2 v_parameterization detected. Setting --v2 parameter and --v_parameterization' ) v2 = True v_parameterization = True return value, v2, v_parameterization # define a list of substrings to v1.x substrings_v1_model = [ 'CompVis/stable-diffusion-v1-4', 'runwayml/stable-diffusion-v1-5', ] if str(value) in substrings_v1_model: v2 = False v_parameterization = False return value, v2, v_parameterization if value == 'custom': value = '' v2 = False v_parameterization = False return value, v2, v_parameterization css = '' if os.path.exists('./style.css'): with open(os.path.join('./style.css'), 'r', encoding='utf8') as file: print('Load CSS...') css += file.read() + '\n' interface = gr.Blocks(css=css) with interface: dummy_true = gr.Label(value=True, visible=False) dummy_false = gr.Label(value=False, visible=False) with gr.Tab('Dreambooth'): gr.Markdown('Enter kohya finetuner parameter using this interface.') with gr.Accordion('Configuration file', open=False): with gr.Row(): button_open_config = gr.Button('Open 📂', elem_id='open_folder') button_save_config = gr.Button('Save 💾', elem_id='open_folder') button_save_as_config = gr.Button( 'Save as... 💾', elem_id='open_folder' ) config_file_name = gr.Textbox( label='', placeholder="type the configuration file path or use the 'Open' button above to select it...", interactive=True, ) # config_file_name.change( # remove_doublequote, # inputs=[config_file_name], # outputs=[config_file_name], # ) with gr.Tab('Source model'): # Define the input elements with gr.Row(): pretrained_model_name_or_path_input = gr.Textbox( label='Pretrained model name or path', placeholder='enter the path to custom model or name of pretrained model', ) pretrained_model_name_or_path_fille = gr.Button( document_symbol, elem_id='open_folder_small' ) pretrained_model_name_or_path_fille.click( get_file_path, inputs=[pretrained_model_name_or_path_input], outputs=pretrained_model_name_or_path_input, ) pretrained_model_name_or_path_folder = gr.Button( folder_symbol, elem_id='open_folder_small' ) pretrained_model_name_or_path_folder.click( get_folder_path, outputs=pretrained_model_name_or_path_input, ) model_list = gr.Dropdown( label='(Optional) Model Quick Pick', choices=[ 'custom', 'stabilityai/stable-diffusion-2-1-base', 'stabilityai/stable-diffusion-2-base', 'stabilityai/stable-diffusion-2-1', 'stabilityai/stable-diffusion-2', 'runwayml/stable-diffusion-v1-5', 'CompVis/stable-diffusion-v1-4', ], ) save_model_as_dropdown = gr.Dropdown( label='Save trained model as', choices=[ 'same as source model', 'ckpt', 'diffusers', 'diffusers_safetensors', 'safetensors', ], value='same as source model', ) with gr.Row(): v2_input = gr.Checkbox(label='v2', value=True) v_parameterization_input = gr.Checkbox( label='v_parameterization', value=False ) pretrained_model_name_or_path_input.change( remove_doublequote, inputs=[pretrained_model_name_or_path_input], outputs=[pretrained_model_name_or_path_input], ) model_list.change( set_pretrained_model_name_or_path_input, inputs=[model_list, v2_input, v_parameterization_input], outputs=[ pretrained_model_name_or_path_input, v2_input, v_parameterization_input, ], ) with gr.Tab('Directories'): with gr.Row(): train_data_dir_input = gr.Textbox( label='Image folder', placeholder='Folder where the training folders containing the images are located', ) train_data_dir_input_folder = gr.Button( '📂', elem_id='open_folder_small' ) train_data_dir_input_folder.click( get_folder_path, outputs=train_data_dir_input ) reg_data_dir_input = gr.Textbox( label='Regularisation folder', placeholder='(Optional) Folder where where the regularization folders containing the images are located', ) reg_data_dir_input_folder = gr.Button( '📂', elem_id='open_folder_small' ) reg_data_dir_input_folder.click( get_folder_path, outputs=reg_data_dir_input ) with gr.Row(): output_dir_input = gr.Textbox( label='Output folder', placeholder='Folder to output trained model', ) output_dir_input_folder = gr.Button( '📂', elem_id='open_folder_small' ) output_dir_input_folder.click( get_folder_path, outputs=output_dir_input ) logging_dir_input = gr.Textbox( label='Logging folder', placeholder='Optional: enable logging and output TensorBoard log to this folder', ) logging_dir_input_folder = gr.Button( '📂', elem_id='open_folder_small' ) logging_dir_input_folder.click( get_folder_path, outputs=logging_dir_input ) train_data_dir_input.change( remove_doublequote, inputs=[train_data_dir_input], outputs=[train_data_dir_input], ) reg_data_dir_input.change( remove_doublequote, inputs=[reg_data_dir_input], outputs=[reg_data_dir_input], ) output_dir_input.change( remove_doublequote, inputs=[output_dir_input], outputs=[output_dir_input], ) logging_dir_input.change( remove_doublequote, inputs=[logging_dir_input], outputs=[logging_dir_input], ) with gr.Tab('Training parameters'): with gr.Row(): learning_rate_input = gr.Textbox( label='Learning rate', value=1e-6 ) lr_scheduler_input = gr.Dropdown( label='LR Scheduler', choices=[ 'constant', 'constant_with_warmup', 'cosine', 'cosine_with_restarts', 'linear', 'polynomial', ], value='constant', ) lr_warmup_input = gr.Textbox(label='LR warmup', value=0) with gr.Row(): train_batch_size_input = gr.Slider( minimum=1, maximum=32, label='Train batch size', value=1, step=1, ) epoch_input = gr.Textbox(label='Epoch', value=1) save_every_n_epochs_input = gr.Textbox( label='Save every N epochs', value=1 ) with gr.Row(): mixed_precision_input = gr.Dropdown( label='Mixed precision', choices=[ 'no', 'fp16', 'bf16', ], value='fp16', ) save_precision_input = gr.Dropdown( label='Save precision', choices=[ 'float', 'fp16', 'bf16', ], value='fp16', ) num_cpu_threads_per_process_input = gr.Slider( minimum=1, maximum=os.cpu_count(), step=1, label='Number of CPU threads per process', value=os.cpu_count(), ) with gr.Row(): seed_input = gr.Textbox(label='Seed', value=1234) max_resolution_input = gr.Textbox( label='Max resolution', value='512,512', placeholder='512,512', ) with gr.Row(): caption_extention_input = gr.Textbox( label='Caption Extension', placeholder='(Optional) Extension for caption files. default: .caption', ) stop_text_encoder_training_input = gr.Slider( minimum=0, maximum=100, value=0, step=1, label='Stop text encoder training', ) with gr.Row(): enable_bucket_input = gr.Checkbox( label='Enable buckets', value=True ) cache_latent_input = gr.Checkbox( label='Cache latent', value=True ) use_8bit_adam_input = gr.Checkbox( label='Use 8bit adam', value=True ) xformers_input = gr.Checkbox(label='Use xformers', value=True) with gr.Accordion('Advanced Configuration', open=False): with gr.Row(): full_fp16_input = gr.Checkbox( label='Full fp16 training (experimental)', value=False ) no_token_padding_input = gr.Checkbox( label='No token padding', value=False ) gradient_checkpointing_input = gr.Checkbox( label='Gradient checkpointing', value=False ) shuffle_caption = gr.Checkbox( label='Shuffle caption', value=False ) save_state = gr.Checkbox(label='Save state', value=False) with gr.Row(): resume = gr.Textbox( label='Resume', placeholder='path to "last-state" state folder to resume from', ) resume_button = gr.Button('📂', elem_id='open_folder_small') resume_button.click(get_folder_path, outputs=resume) prior_loss_weight = gr.Number( label='Prior loss weight', value=1.0 ) button_run = gr.Button('Train model') with gr.Tab('Utilities'): with gr.Tab('Captioning'): gradio_basic_caption_gui_tab() gradio_blip_caption_gui_tab() gradio_wd14_caption_gui_tab() gradio_dreambooth_folder_creation_tab( train_data_dir_input, reg_data_dir_input, output_dir_input, logging_dir_input, ) gradio_dataset_balancing_tab() gradio_convert_model_tab() button_open_config.click( open_configuration, inputs=[ config_file_name, pretrained_model_name_or_path_input, v2_input, v_parameterization_input, logging_dir_input, train_data_dir_input, reg_data_dir_input, output_dir_input, max_resolution_input, learning_rate_input, lr_scheduler_input, lr_warmup_input, train_batch_size_input, epoch_input, save_every_n_epochs_input, mixed_precision_input, save_precision_input, seed_input, num_cpu_threads_per_process_input, cache_latent_input, caption_extention_input, enable_bucket_input, gradient_checkpointing_input, full_fp16_input, no_token_padding_input, stop_text_encoder_training_input, use_8bit_adam_input, xformers_input, save_model_as_dropdown, shuffle_caption, save_state, resume, prior_loss_weight, ], outputs=[ config_file_name, pretrained_model_name_or_path_input, v2_input, v_parameterization_input, logging_dir_input, train_data_dir_input, reg_data_dir_input, output_dir_input, max_resolution_input, learning_rate_input, lr_scheduler_input, lr_warmup_input, train_batch_size_input, epoch_input, save_every_n_epochs_input, mixed_precision_input, save_precision_input, seed_input, num_cpu_threads_per_process_input, cache_latent_input, caption_extention_input, enable_bucket_input, gradient_checkpointing_input, full_fp16_input, no_token_padding_input, stop_text_encoder_training_input, use_8bit_adam_input, xformers_input, save_model_as_dropdown, shuffle_caption, save_state, resume, prior_loss_weight, ], ) save_as = True not_save_as = False button_save_config.click( save_configuration, inputs=[ dummy_false, config_file_name, pretrained_model_name_or_path_input, v2_input, v_parameterization_input, logging_dir_input, train_data_dir_input, reg_data_dir_input, output_dir_input, max_resolution_input, learning_rate_input, lr_scheduler_input, lr_warmup_input, train_batch_size_input, epoch_input, save_every_n_epochs_input, mixed_precision_input, save_precision_input, seed_input, num_cpu_threads_per_process_input, cache_latent_input, caption_extention_input, enable_bucket_input, gradient_checkpointing_input, full_fp16_input, no_token_padding_input, stop_text_encoder_training_input, use_8bit_adam_input, xformers_input, save_model_as_dropdown, shuffle_caption, save_state, resume, prior_loss_weight, ], outputs=[config_file_name], ) button_save_as_config.click( save_configuration, inputs=[ dummy_true, config_file_name, pretrained_model_name_or_path_input, v2_input, v_parameterization_input, logging_dir_input, train_data_dir_input, reg_data_dir_input, output_dir_input, max_resolution_input, learning_rate_input, lr_scheduler_input, lr_warmup_input, train_batch_size_input, epoch_input, save_every_n_epochs_input, mixed_precision_input, save_precision_input, seed_input, num_cpu_threads_per_process_input, cache_latent_input, caption_extention_input, enable_bucket_input, gradient_checkpointing_input, full_fp16_input, no_token_padding_input, stop_text_encoder_training_input, use_8bit_adam_input, xformers_input, save_model_as_dropdown, shuffle_caption, save_state, resume, prior_loss_weight, ], outputs=[config_file_name], ) button_run.click( train_model, inputs=[ pretrained_model_name_or_path_input, v2_input, v_parameterization_input, logging_dir_input, train_data_dir_input, reg_data_dir_input, output_dir_input, max_resolution_input, learning_rate_input, lr_scheduler_input, lr_warmup_input, train_batch_size_input, epoch_input, save_every_n_epochs_input, mixed_precision_input, save_precision_input, seed_input, num_cpu_threads_per_process_input, cache_latent_input, caption_extention_input, enable_bucket_input, gradient_checkpointing_input, full_fp16_input, no_token_padding_input, stop_text_encoder_training_input, use_8bit_adam_input, xformers_input, save_model_as_dropdown, shuffle_caption, save_state, resume, prior_loss_weight, ], ) # Show the interface interface.launch() fine_tune.py METASEP
[ { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"saved state to resume training / 学習再開するモデルのstate\")\n parser.add_argument(\"--max_token_length\", type=int, default=None, choices=[None, 150, 225],\n help=\"max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)\")\n parser.add_argument(\"--train_batch_size\", type=int, default=1,\n help=\"batch size for training / 学習時のバッチサイズ\")\n parser.add_argument(\"--use_8bit_adam\", action=\"store_true\",\n help=\"use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)\")\n parser.add_argument(\"--mem_eff_attn\", action=\"store_true\",\n help=\"use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う\")\n parser.add_argument(\"--xformers\", action=\"store_true\",\n help=\"use xformers for CrossAttention / CrossAttentionにxformersを使う\")\n parser.add_argument(\"--diffusers_xformers\", action='store_true',\n help='use xformers by diffusers (Hypernetworks doesn\\'t work) / Diffusersでxformersを使用する(Hypernetwork利用不可)')\n parser.add_argument(\"--learning_rate\", type=float, default=2.0e-6, help=\"learning rate / 学習率\")\n parser.add_argument(\"--max_train_steps\", type=int, default=1600, help=\"training steps / 学習ステップ数\")\n parser.add_argument(\"--seed\", type=int, default=None, help=\"random seed for training / 学習時の乱数のseed\")\n parser.add_argument(\"--gradient_checkpointing\", action=\"store_true\",\n help=\"enable gradient checkpointing / grandient checkpointingを有効にする\")\n parser.add_argument(\"--gradient_accumulation_steps\", type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass / 学習時に逆伝播をする前に勾配を合計するステップ数\")\n parser.add_argument(\"--mixed_precision\", type=str, default=\"no\",\n choices=[\"no\", \"fp16\", \"bf16\"], help=\"use mixed precision / 混合精度を使う場合、その精度\")\n parser.add_argument(\"--full_fp16\", action=\"store_true\", help=\"fp16 training including gradients / 勾配も含めてfp16で学習する\")\n parser.add_argument(\"--clip_skip\", type=int, default=None,\n help=\"use output of nth layer from back of text encoder (n>=1) / text encoderの後ろからn番目の層の出力を用いる(nは1以上)\")\n parser.add_argument(\"--debug_dataset\", action=\"store_true\",\n help=\"show images for debugging (do not train) / デバッグ用に学習データを画面表示する(学習は行わない)\")\n parser.add_argument(\"--logging_dir\", type=str, default=None,\n help=\"enable logging and output TensorBoard log to this directory / ログ出力を有効にしてこのディレクトリにTensorBoard用のログを出力する\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"saved state to resume training / 学習再開するモデルのstate\")\n parser.add_argument(\"--max_token_length\", type=int, default=None, choices=[None, 150, 225],\n help=\"max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)\")\n parser.add_argument(\"--train_batch_size\", type=int, default=1,\n help=\"batch size for training / 学習時のバッチサイズ\")\n parser.add_argument(\"--use_8bit_adam\", action=\"store_true\",\n help=\"use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)\")\n parser.add_argument(\"--mem_eff_attn\", action=\"store_true\",\n help=\"use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う\")\n parser.add_argument(\"--xformers\", action=\"store_true\",\n help=\"use xformers for CrossAttention / CrossAttentionにxformersを使う\")\n parser.add_argument(\"--diffusers_xformers\", action='store_true',\n help='use xformers by diffusers (Hypernetworks doesn\\'t work) / Diffusersでxformersを使用する(Hypernetwork利用不可)')\n parser.add_argument(\"--learning_rate\", type=float, default=2.0e-6, help=\"learning rate / 学習率\")\n parser.add_argument(\"--max_train_steps\", type=int, default=1600, help=\"training steps / 学習ステップ数\")\n parser.add_argument(\"--seed\", type=int, default=None, help=\"random seed for training / 学習時の乱数のseed\")\n parser.add_argument(\"--gradient_checkpointing\", action=\"store_true\",\n help=\"enable gradient checkpointing / grandient checkpointingを有効にする\")\n parser.add_argument(\"--gradient_accumulation_steps\", type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass / 学習時に逆伝播をする前に勾配を合計するステップ数\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"saved state to resume training / 学習再開するモデルのstate\")\n parser.add_argument(\"--max_token_length\", type=int, default=None, choices=[None, 150, 225],\n help=\"max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)\")\n parser.add_argument(\"--train_batch_size\", type=int, default=1,\n help=\"batch size for training / 学習時のバッチサイズ\")\n parser.add_argument(\"--use_8bit_adam\", action=\"store_true\",\n help=\"use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)\")\n parser.add_argument(\"--mem_eff_attn\", action=\"store_true\",\n help=\"use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う\")\n parser.add_argument(\"--xformers\", action=\"store_true\",\n help=\"use xformers for CrossAttention / CrossAttentionにxformersを使う\")\n parser.add_argument(\"--diffusers_xformers\", action='store_true',\n help='use xformers by diffusers (Hypernetworks doesn\\'t work) / Diffusersでxformersを使用する(Hypernetwork利用不可)')\n parser.add_argument(\"--learning_rate\", type=float, default=2.0e-6, help=\"learning rate / 学習率\")\n parser.add_argument(\"--max_train_steps\", type=int, default=1600, help=\"training steps / 学習ステップ数\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"saved state to resume training / 学習再開するモデルのstate\")\n parser.add_argument(\"--max_token_length\", type=int, default=None, choices=[None, 150, 225],\n help=\"max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"saved state to resume training / 学習再開するモデルのstate\")\n parser.add_argument(\"--max_token_length\", type=int, default=None, choices=[None, 150, 225],\n help=\"max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)\")\n parser.add_argument(\"--train_batch_size\", type=int, default=1,\n help=\"batch size for training / 学習時のバッチサイズ\")\n parser.add_argument(\"--use_8bit_adam\", action=\"store_true\",\n help=\"use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)\")\n parser.add_argument(\"--mem_eff_attn\", action=\"store_true\",\n help=\"use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う\")\n parser.add_argument(\"--xformers\", action=\"store_true\",\n help=\"use xformers for CrossAttention / CrossAttentionにxformersを使う\")\n parser.add_argument(\"--diffusers_xformers\", action='store_true',\n help='use xformers by diffusers (Hypernetworks doesn\\'t work) / Diffusersでxformersを使用する(Hypernetwork利用不可)')", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"saved state to resume training / 学習再開するモデルのstate\")\n parser.add_argument(\"--max_token_length\", type=int, default=None, choices=[None, 150, 225],\n help=\"max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)\")\n parser.add_argument(\"--train_batch_size\", type=int, default=1,\n help=\"batch size for training / 学習時のバッチサイズ\")\n parser.add_argument(\"--use_8bit_adam\", action=\"store_true\",\n help=\"use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)\")\n parser.add_argument(\"--mem_eff_attn\", action=\"store_true\",\n help=\"use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う\")\n parser.add_argument(\"--xformers\", action=\"store_true\",\n help=\"use xformers for CrossAttention / CrossAttentionにxformersを使う\")\n parser.add_argument(\"--diffusers_xformers\", action='store_true',\n help='use xformers by diffusers (Hypernetworks doesn\\'t work) / Diffusersでxformersを使用する(Hypernetwork利用不可)')\n parser.add_argument(\"--learning_rate\", type=float, default=2.0e-6, help=\"learning rate / 学習率\")\n parser.add_argument(\"--max_train_steps\", type=int, default=1600, help=\"training steps / 学習ステップ数\")\n parser.add_argument(\"--seed\", type=int, default=None, help=\"random seed for training / 学習時の乱数のseed\")\n parser.add_argument(\"--gradient_checkpointing\", action=\"store_true\",\n help=\"enable gradient checkpointing / grandient checkpointingを有効にする\")\n parser.add_argument(\"--gradient_accumulation_steps\", type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass / 学習時に逆伝播をする前に勾配を合計するステップ数\")\n parser.add_argument(\"--mixed_precision\", type=str, default=\"no\",\n choices=[\"no\", \"fp16\", \"bf16\"], help=\"use mixed precision / 混合精度を使う場合、その精度\")\n parser.add_argument(\"--full_fp16\", action=\"store_true\", help=\"fp16 training including gradients / 勾配も含めてfp16で学習する\")\n parser.add_argument(\"--clip_skip\", type=int, default=None,\n help=\"use output of nth layer from back of text encoder (n>=1) / text encoderの後ろからn番目の層の出力を用いる(nは1以上)\")\n parser.add_argument(\"--debug_dataset\", action=\"store_true\",\n help=\"show images for debugging (do not train) / デバッグ用に学習データを画面表示する(学習は行わない)\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"saved state to resume training / 学習再開するモデルのstate\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"saved state to resume training / 学習再開するモデルのstate\")\n parser.add_argument(\"--max_token_length\", type=int, default=None, choices=[None, 150, 225],\n help=\"max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)\")\n parser.add_argument(\"--train_batch_size\", type=int, default=1,\n help=\"batch size for training / 学習時のバッチサイズ\")\n parser.add_argument(\"--use_8bit_adam\", action=\"store_true\",\n help=\"use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)\")\n parser.add_argument(\"--mem_eff_attn\", action=\"store_true\",\n help=\"use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う\")\n parser.add_argument(\"--xformers\", action=\"store_true\",\n help=\"use xformers for CrossAttention / CrossAttentionにxformersを使う\")\n parser.add_argument(\"--diffusers_xformers\", action='store_true',\n help='use xformers by diffusers (Hypernetworks doesn\\'t work) / Diffusersでxformersを使用する(Hypernetwork利用不可)')\n parser.add_argument(\"--learning_rate\", type=float, default=2.0e-6, help=\"learning rate / 学習率\")\n parser.add_argument(\"--max_train_steps\", type=int, default=1600, help=\"training steps / 学習ステップ数\")\n parser.add_argument(\"--seed\", type=int, default=None, help=\"random seed for training / 学習時の乱数のseed\")\n parser.add_argument(\"--gradient_checkpointing\", action=\"store_true\",\n help=\"enable gradient checkpointing / grandient checkpointingを有効にする\")\n parser.add_argument(\"--gradient_accumulation_steps\", type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass / 学習時に逆伝播をする前に勾配を合計するステップ数\")\n parser.add_argument(\"--mixed_precision\", type=str, default=\"no\",\n choices=[\"no\", \"fp16\", \"bf16\"], help=\"use mixed precision / 混合精度を使う場合、その精度\")\n parser.add_argument(\"--full_fp16\", action=\"store_true\", help=\"fp16 training including gradients / 勾配も含めてfp16で学習する\")\n parser.add_argument(\"--clip_skip\", type=int, default=None,\n help=\"use output of nth layer from back of text encoder (n>=1) / text encoderの後ろからn番目の層の出力を用いる(nは1以上)\")\n parser.add_argument(\"--debug_dataset\", action=\"store_true\",\n help=\"show images for debugging (do not train) / デバッグ用に学習データを画面表示する(学習は行わない)\")\n parser.add_argument(\"--logging_dir\", type=str, default=None,\n help=\"enable logging and output TensorBoard log to this directory / ログ出力を有効にしてこのディレクトリにTensorBoard用のログを出力する\")\n parser.add_argument(\"--log_prefix\", type=str, default=None, help=\"add prefix for each log directory / ログディレクトリ名の先頭に追加する文字列\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"saved state to resume training / 学習再開するモデルのstate\")\n parser.add_argument(\"--max_token_length\", type=int, default=None, choices=[None, 150, 225],\n help=\"max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)\")\n parser.add_argument(\"--train_batch_size\", type=int, default=1,\n help=\"batch size for training / 学習時のバッチサイズ\")\n parser.add_argument(\"--use_8bit_adam\", action=\"store_true\",\n help=\"use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)\")\n parser.add_argument(\"--mem_eff_attn\", action=\"store_true\",\n help=\"use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う\")\n parser.add_argument(\"--xformers\", action=\"store_true\",\n help=\"use xformers for CrossAttention / CrossAttentionにxformersを使う\")\n parser.add_argument(\"--diffusers_xformers\", action='store_true',\n help='use xformers by diffusers (Hypernetworks doesn\\'t work) / Diffusersでxformersを使用する(Hypernetwork利用不可)')\n parser.add_argument(\"--learning_rate\", type=float, default=2.0e-6, help=\"learning rate / 学習率\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"saved state to resume training / 学習再開するモデルのstate\")\n parser.add_argument(\"--max_token_length\", type=int, default=None, choices=[None, 150, 225],\n help=\"max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)\")\n parser.add_argument(\"--train_batch_size\", type=int, default=1,\n help=\"batch size for training / 学習時のバッチサイズ\")\n parser.add_argument(\"--use_8bit_adam\", action=\"store_true\",\n help=\"use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)\")\n parser.add_argument(\"--mem_eff_attn\", action=\"store_true\",\n help=\"use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う\")\n parser.add_argument(\"--xformers\", action=\"store_true\",\n help=\"use xformers for CrossAttention / CrossAttentionにxformersを使う\")\n parser.add_argument(\"--diffusers_xformers\", action='store_true',\n help='use xformers by diffusers (Hypernetworks doesn\\'t work) / Diffusersでxformersを使用する(Hypernetwork利用不可)')\n parser.add_argument(\"--learning_rate\", type=float, default=2.0e-6, help=\"learning rate / 学習率\")\n parser.add_argument(\"--max_train_steps\", type=int, default=1600, help=\"training steps / 学習ステップ数\")\n parser.add_argument(\"--seed\", type=int, default=None, help=\"random seed for training / 学習時の乱数のseed\")\n parser.add_argument(\"--gradient_checkpointing\", action=\"store_true\",\n help=\"enable gradient checkpointing / grandient checkpointingを有効にする\")\n parser.add_argument(\"--gradient_accumulation_steps\", type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass / 学習時に逆伝播をする前に勾配を合計するステップ数\")\n parser.add_argument(\"--mixed_precision\", type=str, default=\"no\",\n choices=[\"no\", \"fp16\", \"bf16\"], help=\"use mixed precision / 混合精度を使う場合、その精度\")\n parser.add_argument(\"--full_fp16\", action=\"store_true\", help=\"fp16 training including gradients / 勾配も含めてfp16で学習する\")\n parser.add_argument(\"--clip_skip\", type=int, default=None,\n help=\"use output of nth layer from back of text encoder (n>=1) / text encoderの後ろからn番目の層の出力を用いる(nは1以上)\")\n parser.add_argument(\"--debug_dataset\", action=\"store_true\",\n help=\"show images for debugging (do not train) / デバッグ用に学習データを画面表示する(学習は行わない)\")\n parser.add_argument(\"--logging_dir\", type=str, default=None,\n help=\"enable logging and output TensorBoard log to this directory / ログ出力を有効にしてこのディレクトリにTensorBoard用のログを出力する\")\n parser.add_argument(\"--log_prefix\", type=str, default=None, help=\"add prefix for each log directory / ログディレクトリ名の先頭に追加する文字列\")\n parser.add_argument(\"--lr_scheduler\", type=str, default=\"constant\",\n help=\"scheduler to use for learning rate / 学習率のスケジューラ: linear, cosine, cosine_with_restarts, polynomial, constant (default), constant_with_warmup\")", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'", "type": "infile" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)", "type": "inproject" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")", "type": "inproject" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n", "type": "common" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]", "type": "common" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]", "type": "common" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')", "type": "common" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:", "type": "common" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):", "type": "common" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_xformers\n# endregion\n\n\nif __name__ == '__main__':\n # torch.cuda.set_per_process_memory_fraction(0.48)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--v2\", action='store_true',\n help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む')\n parser.add_argument(\"--v_parameterization\", action='store_true',\n help='enable v-parameterization training / v-parameterization学習を有効にする')\n parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,\n help=\"pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル\")\n parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")\n parser.add_argument(\"--shuffle_caption\", action=\"store_true\",\n help=\"shuffle comma-separated caption when fine tuning / fine tuning時にコンマで区切られたcaptionの各要素をshuffleする\")\n parser.add_argument(\"--keep_tokens\", type=int, default=None,\n help=\"keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す\")\n parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")\n parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")\n parser.add_argument(\"--output_dir\", type=str, default=None,\n help=\"directory to output trained model, save as same format as input / 学習後のモデル出力先ディレクトリ(入力と同じ形式で保存)\")\n parser.add_argument(\"--save_precision\", type=str, default=None,\n choices=[None, \"float\", \"fp16\", \"bf16\"], help=\"precision in saving (available in StableDiffusion checkpoint) / 保存時に精度を変更して保存する(StableDiffusion形式での保存時のみ有効)\")\n parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],\n help=\"format to save the model (default is same to original) / モデル保存時の形式(未指定時は元モデルと同じ)\")\n parser.add_argument(\"--use_safetensors\", action='store_true',\n help=\"use safetensors format to save (if save_model_as is not specified) / checkpoint、モデルをsafetensors形式で保存する(save_model_as未指定時)\")\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"train text encoder / text encoderも学習する\")\n parser.add_argument(\"--hypernetwork_module\", type=str, default=None,\n help='train hypernetwork instead of fine tuning, module to use / fine tuningの代わりにHypernetworkの学習をする場合、そのモジュール')\n parser.add_argument(\"--hypernetwork_weights\", type=str, default=None,\n help='hypernetwork weights to initialize for additional training / Hypernetworkの学習時に読み込む重み(Hypernetworkの追加学習)')\n parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,\n help=\"save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する\")\n parser.add_argument(\"--save_state\", action=\"store_true\",\n help=\"save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"saved state to resume training / 学習再開するモデルのstate\")\n parser.add_argument(\"--max_token_length\", type=int, default=None, choices=[None, 150, 225],\n help=\"max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)\")\n parser.add_argument(\"--train_batch_size\", type=int, default=1,\n help=\"batch size for training / 学習時のバッチサイズ\")\n parser.add_argument(\"--use_8bit_adam\", action=\"store_true\",\n help=\"use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)\")\n parser.add_argument(\"--mem_eff_attn\", action=\"store_true\",\n help=\"use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う\")\n parser.add_argument(\"--xformers\", action=\"store_true\",\n help=\"use xformers for CrossAttention / CrossAttentionにxformersを使う\")\n parser.add_argument(\"--diffusers_xformers\", action='store_true',\n help='use xformers by diffusers (Hypernetworks doesn\\'t work) / Diffusersでxformersを使用する(Hypernetwork利用不可)')\n parser.add_argument(\"--learning_rate\", type=float, default=2.0e-6, help=\"learning rate / 学習率\")\n parser.add_argument(\"--max_train_steps\", type=int, default=1600, help=\"training steps / 学習ステップ数\")\n parser.add_argument(\"--seed\", type=int, default=None, help=\"random seed for training / 学習時の乱数のseed\")\n parser.add_argument(\"--gradient_checkpointing\", action=\"store_true\",\n help=\"enable gradient checkpointing / grandient checkpointingを有効にする\")\n parser.add_argument(\"--gradient_accumulation_steps\", type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass / 学習時に逆伝播をする前に勾配を合計するステップ数\")\n parser.add_argument(\"--mixed_precision\", type=str, default=\"no\",\n choices=[\"no\", \"fp16\", \"bf16\"], help=\"use mixed precision / 混合精度を使う場合、その精度\")\n parser.add_argument(\"--full_fp16\", action=\"store_true\", help=\"fp16 training including gradients / 勾配も含めてfp16で学習する\")\n parser.add_argument(\"--clip_skip\", type=int, default=None,\n help=\"use output of nth layer from back of text encoder (n>=1) / text encoderの後ろからn番目の層の出力を用いる(nは1以上)\")\n parser.add_argument(\"--debug_dataset\", action=\"store_true\",\n help=\"show images for debugging (do not train) / デバッグ用に学習データを画面表示する(学習は行わない)\")\n parser.add_argument(\"--logging_dir\", type=str, default=None,\n help=\"enable logging and output TensorBoard log to this directory / ログ出力を有効にしてこのディレクトリにTensorBoard用のログを出力する\")\n parser.add_argument(\"--log_prefix\", type=str, default=None, help=\"add prefix for each log directory / ログディレクトリ名の先頭に追加する文字列\")\n parser.add_argument(\"--lr_scheduler\", type=str, default=\"constant\",\n help=\"scheduler to use for learning rate / 学習率のスケジューラ: linear, cosine, cosine_with_restarts, polynomial, constant (default), constant_with_warmup\")\n parser.add_argument(\"--lr_warmup_steps\", type=int, default=0,\n help=\"Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)\")\n\n args = parser.parse_args()", "type": "commited" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:", "type": "commited" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:", "type": "commited" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n", "type": "commited" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:", "type": "commited" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)\n dv = torch.zeros_like(v)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n do.split(q_bucket_size, dim=-2),\n mask,\n l.split(q_bucket_size, dim=-2),\n m.split(q_bucket_size, dim=-2),\n dq.split(q_bucket_size, dim=-2)\n )\n\n for ind, (qc, oc, doc, row_mask, lc, mc, dqc) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n dk.split(k_bucket_size, dim=-2),\n dv.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n exp_attn_weights = torch.exp(attn_weights - mc)\n\n if exists(row_mask):\n exp_attn_weights.masked_fill_(~row_mask, 0.)\n\n p = exp_attn_weights / lc\n\n dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)\n dp = einsum('... i d, ... j d -> ... i j', doc, vc)\n\n D = (doc * oc).sum(dim=-1, keepdims=True)\n ds = p * scale * (dp - D)\n\n dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)\n dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)\n\n dqc.add_(dq_chunk)\n dkc.add_(dk_chunk)\n dvc.add_(dv_chunk)\n\n return dq, dk, dv, None, None, None, None\n\n\ndef replace_unet_modules(unet: diffusers.models.unet_2d_condition.UNet2DConditionModel, mem_eff_attn, xformers):\n if mem_eff_attn:\n replace_unet_cross_attn_to_memory_efficient()\n elif xformers:\n replace_unet_cross_attn_to_xformers()\n\n\ndef replace_unet_cross_attn_to_memory_efficient():\n print(\"Replace CrossAttention.forward to use FlashAttention (not xformers)\")\n flash_func = FlashAttentionFunction\n\n def forward_flash_attn(self, x, context=None, mask=None):\n q_bucket_size = 512\n k_bucket_size = 1024\n\n h = self.heads\n q = self.to_q(x)\n\n context = context if context is not None else x\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k = self.to_k(context_k)\n v = self.to_v(context_v)\n del context, x\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))\n\n out = flash_func.apply(q, k, v, mask, False, q_bucket_size, k_bucket_size)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n\n # diffusers 0.7.0~ わざわざ変えるなよ (;´Д`)\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n\n diffusers.models.attention.CrossAttention.forward = forward_flash_attn\n\n\ndef replace_unet_cross_attn_to_xformers():\n print(\"Replace CrossAttention.forward to use xformers\")\n try:\n import xformers.ops\n except ImportError:\n raise ImportError(\"No xformers / xformersがインストールされていないようです\")\n\n def forward_xformers(self, x, context=None, mask=None):\n h = self.heads\n q_in = self.to_q(x)\n\n context = default(context, x)\n context = context.to(x.dtype)\n\n if hasattr(self, 'hypernetwork') and self.hypernetwork is not None:\n context_k, context_v = self.hypernetwork.forward(x, context)\n context_k = context_k.to(x.dtype)\n context_v = context_v.to(x.dtype)\n else:\n context_k = context\n context_v = context\n\n k_in = self.to_k(context_k)\n v_in = self.to_v(context_v)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))\n del q_in, k_in, v_in\n\n q = q.contiguous()\n k = k.contiguous()\n v = v.contiguous()\n out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) # 最適なのを選んでくれる\n\n out = rearrange(out, 'b n h d -> b n (h d)', h=h)\n\n # diffusers 0.7.0~\n out = self.to_out[0](out)\n out = self.to_out[1](out)\n return out\n", "type": "commited" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))", "type": "non_informative" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id", "type": "non_informative" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")", "type": "non_informative" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n", "type": "non_informative" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)", "type": "non_informative" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:", "type": "random" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない", "type": "random" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())", "type": "random" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n fn_recursive_set_mem_eff(model)\n\n # モデルに xformers とか memory efficient attention を組み込む\n if args.diffusers_xformers:\n print(\"Use xformers by Diffusers\")\n set_diffusers_xformers_flag(unet, True)\n else:\n # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある\n print(\"Disable Diffusers' xformers\")\n set_diffusers_xformers_flag(unet, False)\n replace_unet_modules(unet, args.mem_eff_attn, args.xformers)\n\n if not fine_tuning:\n # Hypernetwork\n print(\"import hypernetwork module:\", args.hypernetwork_module)\n hyp_module = importlib.import_module(args.hypernetwork_module)\n\n hypernetwork = hyp_module.Hypernetwork()\n\n if args.hypernetwork_weights is not None:\n print(\"load hypernetwork weights from:\", args.hypernetwork_weights)\n hyp_sd = torch.load(args.hypernetwork_weights, map_location='cpu')\n success = hypernetwork.load_from_state_dict(hyp_sd)\n assert success, \"hypernetwork weights loading failed.\"\n\n print(\"apply hypernetwork\")\n hypernetwork.apply_to_diffusers(None, text_encoder, unet)\n\n # 学習を準備する:モデルを適切な状態にする\n training_models = []\n if fine_tuning:\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n training_models.append(unet)\n\n if args.train_text_encoder:\n print(\"enable text encoder training\")\n if args.gradient_checkpointing:\n text_encoder.gradient_checkpointing_enable()\n training_models.append(text_encoder)\n else:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False) # text encoderは学習しない\n text_encoder.eval()\n else:\n unet.to(accelerator.device) # , dtype=weight_dtype) # dtypeを指定すると学習できない\n unet.requires_grad_(False)\n unet.eval()\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n text_encoder.requires_grad_(False)\n text_encoder.eval()\n training_models.append(hypernetwork)\n\n for m in training_models:\n m.requires_grad_(True)\n params = []\n for m in training_models:\n params.extend(m.parameters())\n params_to_optimize = params\n\n # 学習に必要なクラスを準備する\n print(\"prepare optimizer, data loader etc.\")\n\n # 8-bit Adamを使う\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\"No bitsand bytes / bitsandbytesがインストールされていないようです\")\n print(\"use 8-bit Adam optimizer\")\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略\n optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate)\n\n # dataloaderを準備する\n # DataLoaderのプロセス数:0はメインプロセスになる\n n_workers = min(8, os.cpu_count() - 1) # cpu_count-1 ただし最大8\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn, num_workers=n_workers)\n\n # lr schedulerを用意する\n lr_scheduler = diffusers.optimization.get_scheduler(\n args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps)\n\n # acceleratorがなんかよろしくやってくれるらしい\n if args.full_fp16:\n assert args.mixed_precision == \"fp16\", \"full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。\"\n print(\"enable full fp16 training.\")\n\n if fine_tuning:\n # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする\n if args.full_fp16:\n unet.to(weight_dtype)\n text_encoder.to(weight_dtype)\n\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler)\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)\n else:\n if args.full_fp16:\n unet.to(weight_dtype)\n hypernetwork.to(weight_dtype)\n\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, hypernetwork, optimizer, train_dataloader, lr_scheduler)\n\n # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする\n if args.full_fp16:\n org_unscale_grads = accelerator.scaler._unscale_grads_\n\n def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16):\n return org_unscale_grads(optimizer, inv_scale, found_inf, True)\n\n accelerator.scaler._unscale_grads_ = _unscale_grads_replacer\n\n # TODO accelerateのconfigに指定した型とオプション指定の型とをチェックして異なれば警告を出す\n\n # resumeする\n if args.resume is not None:\n print(f\"resume training from state: {args.resume}\")\n accelerator.load_state(args.resume)\n\n # epoch数を計算する\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # 学習する\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n print(\"running training / 学習開始\")\n print(f\" num examples / サンプル数: {train_dataset.images_count}\")\n print(f\" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}\")\n print(f\" num epochs / epoch数: {num_train_epochs}\")\n print(f\" batch size per device / バッチサイズ: {args.train_batch_size}\")\n print(f\" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}\")\n print(f\" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}\")\n print(f\" total optimization steps / 学習ステップ数: {args.max_train_steps}\")\n\n progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc=\"steps\")\n global_step = 0\n\n # v4で更新:clip_sample=Falseに\n # Diffusersのtrain_dreambooth.pyがconfigから持ってくるように変更されたので、clip_sample=Falseになるため、それに合わせる\n # 既存の1.4/1.5/2.0/2.1はすべてschdulerのconfigは(クラス名を除いて)同じ\n # よくソースを見たら学習時はclip_sampleは関係ないや(;'∀')\n noise_scheduler = DDPMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000, clip_sample=False)\n\n if accelerator.is_main_process:\n accelerator.init_trackers(\"finetuning\" if fine_tuning else \"hypernetwork\")\n\n # 以下 train_dreambooth.py からほぼコピペ\n for epoch in range(num_train_epochs):\n print(f\"epoch {epoch+1}/{num_train_epochs}\")\n for m in training_models:\n m.train()\n\n loss_total = 0\n for step, batch in enumerate(train_dataloader):\n with accelerator.accumulate(training_models[0]): # 複数モデルに対応していない模様だがとりあえずこうしておく\n latents = batch[\"latents\"].to(accelerator.device)\n latents = latents * 0.18215\n b_size = latents.shape[0]\n\n # with torch.no_grad():\n with torch.set_grad_enabled(args.train_text_encoder):\n # Get the text embedding for conditioning\n input_ids = batch[\"input_ids\"].to(accelerator.device)\n input_ids = input_ids.reshape((-1, tokenizer.model_max_length)) # batch_size*3, 77\n\n if args.clip_skip is None:\n encoder_hidden_states = text_encoder(input_ids)[0]\n else:\n enc_out = text_encoder(input_ids, output_hidden_states=True, return_dict=True)\n encoder_hidden_states = enc_out['hidden_states'][-args.clip_skip]\n encoder_hidden_states = text_encoder.text_model.final_layer_norm(encoder_hidden_states)\n\n # bs*3, 77, 768 or 1024\n encoder_hidden_states = encoder_hidden_states.reshape((b_size, -1, encoder_hidden_states.shape[-1]))\n\n if args.max_token_length is not None:\n if args.v2:\n # v2: <BOS>...<EOS> <PAD> ... の三連を <BOS>...<EOS> <PAD> ... へ戻す 正直この実装でいいのかわからん\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n chunk = encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2] # <BOS> の後から 最後の前まで\n if i > 0:\n for j in range(len(chunk)):\n if input_ids[j, 1] == tokenizer.eos_token: # 空、つまり <BOS> <EOS> <PAD> ...のパターン\n chunk[j, 0] = chunk[j, 1] # 次の <PAD> の値をコピーする\n states_list.append(chunk) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS> か <PAD> のどちらか\n encoder_hidden_states = torch.cat(states_list, dim=1)\n else:\n # v1: <BOS>...<EOS> の三連を <BOS>...<EOS> へ戻す\n states_list = [encoder_hidden_states[:, 0].unsqueeze(1)] # <BOS>\n for i in range(1, args.max_token_length, tokenizer.model_max_length):\n states_list.append(encoder_hidden_states[:, i:i + tokenizer.model_max_length - 2]) # <BOS> の後から <EOS> の前まで\n states_list.append(encoder_hidden_states[:, -1].unsqueeze(1)) # <EOS>\n encoder_hidden_states = torch.cat(states_list, dim=1)\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents, device=latents.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Predict the noise residual\n noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n if args.v_parameterization:\n # v-parameterization training\n # Diffusers 0.10.0からv_parameterizationの学習に対応したのでそちらを使う\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n target = noise\n\n loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = []\n for m in training_models:\n params_to_clip.extend(m.parameters())\n accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm)\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad(set_to_none=True)\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n global_step += 1\n\n current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず\n if args.logging_dir is not None:\n logs = {\"loss\": current_loss, \"lr\": lr_scheduler.get_last_lr()[0]}\n accelerator.log(logs, step=global_step)\n\n loss_total += current_loss\n avr_loss = loss_total / (step+1)\n logs = {\"loss\": avr_loss} # , \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n\n if global_step >= args.max_train_steps:\n break\n\n if args.logging_dir is not None:\n logs = {\"epoch_loss\": loss_total / len(train_dataloader)}\n accelerator.log(logs, step=epoch+1)\n\n accelerator.wait_for_everyone()\n\n if args.save_every_n_epochs is not None:\n if (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs:\n print(\"saving checkpoint.\")\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_epoch_ckpt_name(use_safetensors, epoch + 1))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, unwrap_model(text_encoder), unwrap_model(unet),\n src_stable_diffusion_ckpt, epoch + 1, global_step, save_dtype, vae)\n else:\n out_dir = os.path.join(args.output_dir, EPOCH_DIFFUSERS_DIR_NAME.format(epoch + 1))\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, unwrap_model(text_encoder), unwrap_model(unet),\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))\n\n if args.save_state:\n print(\"saving state.\")\n accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(epoch + 1)))\n\n is_main_process = accelerator.is_main_process\n if is_main_process:\n if fine_tuning:\n unet = unwrap_model(unet)\n text_encoder = unwrap_model(text_encoder)\n else:\n hypernetwork = unwrap_model(hypernetwork)\n\n accelerator.end_training()\n\n if args.save_state:\n print(\"saving last state.\")\n accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME))\n\n del accelerator # この後メモリを使うのでこれは消す\n\n if is_main_process:\n os.makedirs(args.output_dir, exist_ok=True)\n ckpt_file = os.path.join(args.output_dir, model_util.get_last_ckpt_name(use_safetensors))\n\n if fine_tuning:\n if save_stable_diffusion_format:\n print(f\"save trained model as StableDiffusion checkpoint to {ckpt_file}\")\n model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,\n src_stable_diffusion_ckpt, epoch, global_step, save_dtype, vae)\n else:\n # Create the pipeline using using the trained modules and save it.\n print(f\"save trained model as Diffusers to {args.output_dir}\")\n out_dir = os.path.join(args.output_dir, LAST_DIFFUSERS_DIR_NAME)\n os.makedirs(out_dir, exist_ok=True)\n model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,\n src_diffusers_model_path, vae=vae, use_safetensors=use_safetensors)\n else:\n print(f\"save trained model to {ckpt_file}\")\n save_hypernetwork(ckpt_file, hypernetwork)\n\n print(\"model saved.\")\n\n\n# region モジュール入れ替え部\n\"\"\"\n高速化のためのモジュール入れ替え\n\"\"\"\n\n# FlashAttentionを使うCrossAttention\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# LICENSE MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\n# constants\n\nEPSILON = 1e-6\n\n# helper functions\n\n\ndef exists(val):\n return val is not None\n\n\ndef default(val, d):\n return val if exists(val) else d\n\n# flash attention forwards and backwards\n\n# https://arxiv.org/abs/2205.14135\n\n\nclass FlashAttentionFunction(torch.autograd.function.Function):\n @ staticmethod\n @ torch.no_grad()\n def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):\n \"\"\" Algorithm 2 in the paper \"\"\"\n\n device = q.device\n dtype = q.dtype\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n o = torch.zeros_like(q)\n all_row_sums = torch.zeros((*q.shape[:-1], 1), dtype=dtype, device=device)\n all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, dtype=dtype, device=device)\n\n scale = (q.shape[-1] ** -0.5)\n\n if not exists(mask):\n mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)\n else:\n mask = rearrange(mask, 'b n -> b 1 1 n')\n mask = mask.split(q_bucket_size, dim=-1)\n\n row_splits = zip(\n q.split(q_bucket_size, dim=-2),\n o.split(q_bucket_size, dim=-2),\n mask,\n all_row_sums.split(q_bucket_size, dim=-2),\n all_row_maxes.split(q_bucket_size, dim=-2),\n )\n\n for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):\n q_start_index = ind * q_bucket_size - qk_len_diff\n\n col_splits = zip(\n k.split(k_bucket_size, dim=-2),\n v.split(k_bucket_size, dim=-2),\n )\n\n for k_ind, (kc, vc) in enumerate(col_splits):\n k_start_index = k_ind * k_bucket_size\n\n attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale\n\n if exists(row_mask):\n attn_weights.masked_fill_(~row_mask, max_neg_value)\n\n if causal and q_start_index < (k_start_index + k_bucket_size - 1):\n causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype=torch.bool,\n device=device).triu(q_start_index - k_start_index + 1)\n attn_weights.masked_fill_(causal_mask, max_neg_value)\n\n block_row_maxes = attn_weights.amax(dim=-1, keepdims=True)\n attn_weights -= block_row_maxes\n exp_weights = torch.exp(attn_weights)\n\n if exists(row_mask):\n exp_weights.masked_fill_(~row_mask, 0.)\n\n block_row_sums = exp_weights.sum(dim=-1, keepdims=True).clamp(min=EPSILON)\n\n new_row_maxes = torch.maximum(block_row_maxes, row_maxes)\n\n exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)\n\n exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)\n exp_block_row_max_diff = torch.exp(block_row_maxes - new_row_maxes)\n\n new_row_sums = exp_row_max_diff * row_sums + exp_block_row_max_diff * block_row_sums\n\n oc.mul_((row_sums / new_row_sums) * exp_row_max_diff).add_((exp_block_row_max_diff / new_row_sums) * exp_values)\n\n row_maxes.copy_(new_row_maxes)\n row_sums.copy_(new_row_sums)\n\n ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)\n ctx.save_for_backward(q, k, v, o, all_row_sums, all_row_maxes)\n\n return o\n\n @ staticmethod\n @ torch.no_grad()\n def backward(ctx, do):\n \"\"\" Algorithm 4 in the paper \"\"\"\n\n causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args\n q, k, v, o, l, m = ctx.saved_tensors\n\n device = q.device\n\n max_neg_value = -torch.finfo(q.dtype).max\n qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)\n\n dq = torch.zeros_like(q)\n dk = torch.zeros_like(k)", "type": "random" }, { "content": "# v2: select precision for saved checkpoint\n# v3: add logging for tensorboard, fix to shuffle=False in DataLoader (shuffling is in dataset)\n# v4: support SD2.0, add lr scheduler options, supports save_every_n_epochs and save_state for DiffUsers model\n# v5: refactor to use model_util, support safetensors, add settings to use Diffusers' xformers, add log prefix\n# v6: model_util update\n# v7: support Diffusers 0.10.0 (v-parameterization training, safetensors in Diffusers) and accelerate 0.15.0, support full path in metadata\n# v8: experimental full fp16 training.\n# v9: add keep_tokens and save_model_as option, flip augmentation\n\n# このスクリプトのライセンスは、train_dreambooth.pyと同じくApache License 2.0とします\n# License:\n# Copyright 2022 Kohya S. @kohya_ss\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# License of included scripts:\n\n# Diffusers: ASL 2.0 https://github.com/huggingface/diffusers/blob/main/LICENSE\n\n# Memory efficient attention:\n# based on https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/memory_efficient_attention_pytorch/flash_attention.py\n# MIT https://github.com/lucidrains/memory-efficient-attention-pytorch/blob/main/LICENSE\n\nimport argparse\nimport math\nimport os\nimport random\nimport json\nimport importlib\nimport time\n\nfrom tqdm import tqdm\nimport torch\nfrom accelerate import Accelerator\nfrom accelerate.utils import set_seed\nfrom transformers import CLIPTokenizer\nimport diffusers\nfrom diffusers import DDPMScheduler, StableDiffusionPipeline\nimport numpy as np\nfrom einops import rearrange\nfrom torch import einsum\n\nimport library.model_util as model_util\n\n# Tokenizer: checkpointから読み込むのではなくあらかじめ提供されているものを使う\nTOKENIZER_PATH = \"openai/clip-vit-large-patch14\"\nV2_STABLE_DIFFUSION_PATH = \"stabilityai/stable-diffusion-2\" # ここからtokenizerだけ使う v2とv2.1はtokenizer仕様は同じ\n\n# checkpointファイル名\nEPOCH_STATE_NAME = \"epoch-{:06d}-state\"\nLAST_STATE_NAME = \"last-state\"\n\nLAST_DIFFUSERS_DIR_NAME = \"last\"\nEPOCH_DIFFUSERS_DIR_NAME = \"epoch-{:06d}\"\n\n\ndef collate_fn(examples):\n return examples[0]\n\n\nclass FineTuningDataset(torch.utils.data.Dataset):\n def __init__(self, metadata, train_data_dir, batch_size, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, dataset_repeats, debug) -> None:\n super().__init__()\n\n self.metadata = metadata\n self.train_data_dir = train_data_dir\n self.batch_size = batch_size\n self.tokenizer: CLIPTokenizer = tokenizer\n self.max_token_length = max_token_length\n self.shuffle_caption = shuffle_caption\n self.shuffle_keep_tokens = shuffle_keep_tokens\n self.debug = debug\n\n self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2\n\n print(\"make buckets\")\n\n # 最初に数を数える\n self.bucket_resos = set()\n for img_md in metadata.values():\n if 'train_resolution' in img_md:\n self.bucket_resos.add(tuple(img_md['train_resolution']))\n self.bucket_resos = list(self.bucket_resos)\n self.bucket_resos.sort()\n print(f\"number of buckets: {len(self.bucket_resos)}\")\n\n reso_to_index = {}\n for i, reso in enumerate(self.bucket_resos):\n reso_to_index[reso] = i\n\n # bucketに割り当てていく\n self.buckets = [[] for _ in range(len(self.bucket_resos))]\n n = 1 if dataset_repeats is None else dataset_repeats\n images_count = 0\n for image_key, img_md in metadata.items():\n if 'train_resolution' not in img_md:\n continue\n if not os.path.exists(self.image_key_to_npz_file(image_key)):\n continue\n\n reso = tuple(img_md['train_resolution'])\n for _ in range(n):\n self.buckets[reso_to_index[reso]].append(image_key)\n images_count += n\n\n # 参照用indexを作る\n self.buckets_indices = []\n for bucket_index, bucket in enumerate(self.buckets):\n batch_count = int(math.ceil(len(bucket) / self.batch_size))\n for batch_index in range(batch_count):\n self.buckets_indices.append((bucket_index, batch_index))\n\n self.shuffle_buckets()\n self._length = len(self.buckets_indices)\n self.images_count = images_count\n\n def show_buckets(self):\n for i, (reso, bucket) in enumerate(zip(self.bucket_resos, self.buckets)):\n print(f\"bucket {i}: resolution {reso}, count: {len(bucket)}\")\n\n def shuffle_buckets(self):\n random.shuffle(self.buckets_indices)\n for bucket in self.buckets:\n random.shuffle(bucket)\n\n def image_key_to_npz_file(self, image_key):\n npz_file_norm = os.path.splitext(image_key)[0] + '.npz'\n if os.path.exists(npz_file_norm):\n if random.random() < .5:\n npz_file_flip = os.path.splitext(image_key)[0] + '_flip.npz'\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n npz_file_norm = os.path.join(self.train_data_dir, image_key + '.npz')\n if random.random() < .5:\n npz_file_flip = os.path.join(self.train_data_dir, image_key + '_flip.npz')\n if os.path.exists(npz_file_flip):\n return npz_file_flip\n return npz_file_norm\n\n def load_latent(self, image_key):\n return np.load(self.image_key_to_npz_file(image_key))['arr_0']\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n if index == 0:\n self.shuffle_buckets()\n\n bucket = self.buckets[self.buckets_indices[index][0]]\n image_index = self.buckets_indices[index][1] * self.batch_size\n\n input_ids_list = []\n latents_list = []\n captions = []\n for image_key in bucket[image_index:image_index + self.batch_size]:\n img_md = self.metadata[image_key]\n caption = img_md.get('caption')\n tags = img_md.get('tags')\n\n if caption is None:\n caption = tags\n elif tags is not None and len(tags) > 0:\n caption = caption + ', ' + tags\n assert caption is not None and len(caption) > 0, f\"caption or tag is required / キャプションまたはタグは必須です:{image_key}\"\n\n latents = self.load_latent(image_key)\n\n if self.shuffle_caption:\n tokens = caption.strip().split(\",\")\n if self.shuffle_keep_tokens is None:\n random.shuffle(tokens)\n else:\n if len(tokens) > self.shuffle_keep_tokens:\n keep_tokens = tokens[:self.shuffle_keep_tokens]\n tokens = tokens[self.shuffle_keep_tokens:]\n random.shuffle(tokens)\n tokens = keep_tokens + tokens\n caption = \",\".join(tokens).strip()\n\n captions.append(caption)\n\n input_ids = self.tokenizer(caption, padding=\"max_length\", truncation=True,\n max_length=self.tokenizer_max_length, return_tensors=\"pt\").input_ids\n\n if self.tokenizer_max_length > self.tokenizer.model_max_length:\n input_ids = input_ids.squeeze(0)\n iids_list = []\n if self.tokenizer.pad_token_id == self.tokenizer.eos_token_id:\n # v1\n # 77以上の時は \"<BOS> .... <EOS> <EOS> <EOS>\" でトータル227とかになっているので、\"<BOS>...<EOS>\"の三連に変換する\n # 1111氏のやつは , で区切る、とかしているようだが とりあえず単純に\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2): # (1, 152, 75)\n ids_chunk = (input_ids[0].unsqueeze(0),\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0))\n ids_chunk = torch.cat(ids_chunk)\n iids_list.append(ids_chunk)\n else:\n # v2\n # 77以上の時は \"<BOS> .... <EOS> <PAD> <PAD>...\" でトータル227とかになっているので、\"<BOS>...<EOS> <PAD> <PAD> ...\"の三連に変換する\n for i in range(1, self.tokenizer_max_length - self.tokenizer.model_max_length + 2, self.tokenizer.model_max_length - 2):\n ids_chunk = (input_ids[0].unsqueeze(0), # BOS\n input_ids[i:i + self.tokenizer.model_max_length - 2],\n input_ids[-1].unsqueeze(0)) # PAD or EOS\n ids_chunk = torch.cat(ids_chunk)\n\n # 末尾が <EOS> <PAD> または <PAD> <PAD> の場合は、何もしなくてよい\n # 末尾が x <PAD/EOS> の場合は末尾を <EOS> に変える(x <EOS> なら結果的に変化なし)\n if ids_chunk[-2] != self.tokenizer.eos_token_id and ids_chunk[-2] != self.tokenizer.pad_token_id:\n ids_chunk[-1] = self.tokenizer.eos_token_id\n # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える\n if ids_chunk[1] == self.tokenizer.pad_token_id:\n ids_chunk[1] = self.tokenizer.eos_token_id\n\n iids_list.append(ids_chunk)\n\n input_ids = torch.stack(iids_list) # 3,77\n\n input_ids_list.append(input_ids)\n latents_list.append(torch.FloatTensor(latents))\n\n example = {}\n example['input_ids'] = torch.stack(input_ids_list)\n example['latents'] = torch.stack(latents_list)\n if self.debug:\n example['image_keys'] = bucket[image_index:image_index + self.batch_size]\n example['captions'] = captions\n return example\n\n\ndef save_hypernetwork(output_file, hypernetwork):\n state_dict = hypernetwork.get_state_dict()\n torch.save(state_dict, output_file)\n\n\ndef train(args):\n fine_tuning = args.hypernetwork_module is None # fine tuning or hypernetwork training\n\n # その他のオプション設定を確認する\n if args.v_parameterization and not args.v2:\n print(\"v_parameterization should be with v2 / v1でv_parameterizationを使用することは想定されていません\")\n if args.v2 and args.clip_skip is not None:\n print(\"v2 with clip_skip will be unexpected / v2でclip_skipを使用することは想定されていません\")\n\n # モデル形式のオプション設定を確認する\n load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path)\n\n if load_stable_diffusion_format:\n src_stable_diffusion_ckpt = args.pretrained_model_name_or_path\n src_diffusers_model_path = None\n else:\n src_stable_diffusion_ckpt = None\n src_diffusers_model_path = args.pretrained_model_name_or_path\n\n if args.save_model_as is None:\n save_stable_diffusion_format = load_stable_diffusion_format\n use_safetensors = args.use_safetensors\n else:\n save_stable_diffusion_format = args.save_model_as.lower() == 'ckpt' or args.save_model_as.lower() == 'safetensors'\n use_safetensors = args.use_safetensors or (\"safetensors\" in args.save_model_as.lower())\n\n # 乱数系列を初期化する\n if args.seed is not None:\n set_seed(args.seed)\n\n # メタデータを読み込む\n if os.path.exists(args.in_json):\n print(f\"loading existing metadata: {args.in_json}\")\n with open(args.in_json, \"rt\", encoding='utf-8') as f:\n metadata = json.load(f)\n else:\n print(f\"no metadata / メタデータファイルがありません: {args.in_json}\")\n return\n\n # tokenizerを読み込む\n print(\"prepare tokenizer\")\n if args.v2:\n tokenizer = CLIPTokenizer.from_pretrained(V2_STABLE_DIFFUSION_PATH, subfolder=\"tokenizer\")\n else:\n tokenizer = CLIPTokenizer.from_pretrained(TOKENIZER_PATH)\n\n if args.max_token_length is not None:\n print(f\"update token length: {args.max_token_length}\")\n\n # datasetを用意する\n print(\"prepare dataset\")\n train_dataset = FineTuningDataset(metadata, args.train_data_dir, args.train_batch_size,\n tokenizer, args.max_token_length, args.shuffle_caption, args.keep_tokens,\n args.dataset_repeats, args.debug_dataset)\n\n print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")\n print(f\"Total images / 画像数: {train_dataset.images_count}\")\n\n if len(train_dataset) == 0:\n print(\"No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。\")\n return\n\n if args.debug_dataset:\n train_dataset.show_buckets()\n i = 0\n for example in train_dataset:\n print(f\"image: {example['image_keys']}\")\n print(f\"captions: {example['captions']}\")\n print(f\"latents: {example['latents'].shape}\")\n print(f\"input_ids: {example['input_ids'].shape}\")\n print(example['input_ids'])\n i += 1\n if i >= 8:\n break\n return\n\n # acceleratorを準備する\n print(\"prepare accelerator\")\n if args.logging_dir is None:\n log_with = None\n logging_dir = None\n else:\n log_with = \"tensorboard\"\n log_prefix = \"\" if args.log_prefix is None else args.log_prefix\n logging_dir = args.logging_dir + \"/\" + log_prefix + time.strftime('%Y%m%d%H%M%S', time.localtime())\n accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision, log_with=log_with, logging_dir=logging_dir)\n\n # accelerateの互換性問題を解決する\n accelerator_0_15 = True\n try:\n accelerator.unwrap_model(\"dummy\", True)\n print(\"Using accelerator 0.15.0 or above.\")\n except TypeError:\n accelerator_0_15 = False\n\n def unwrap_model(model):\n if accelerator_0_15:\n return accelerator.unwrap_model(model, True)\n return accelerator.unwrap_model(model)\n\n # mixed precisionに対応した型を用意しておき適宜castする\n weight_dtype = torch.float32\n if args.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif args.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n save_dtype = None\n if args.save_precision == \"fp16\":\n save_dtype = torch.float16\n elif args.save_precision == \"bf16\":\n save_dtype = torch.bfloat16\n elif args.save_precision == \"float\":\n save_dtype = torch.float32\n\n # モデルを読み込む\n if load_stable_diffusion_format:\n print(\"load StableDiffusion checkpoint\")\n text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)\n else:\n print(\"load Diffusers pretrained models\")\n pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None)\n # , torch_dtype=weight_dtype) ここでtorch_dtypeを指定すると学習時にエラーになる\n text_encoder = pipe.text_encoder\n unet = pipe.unet\n vae = pipe.vae\n del pipe\n vae.to(\"cpu\") # 保存時にしか使わないので、メモリを開けるためCPUに移しておく\n\n # Diffusers版のxformers使用フラグを設定する関数\n def set_diffusers_xformers_flag(model, valid):\n # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう\n # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`)\n # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか\n # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^)\n", "type": "random" } ]
[ " if not os.path.exists(self.image_key_to_npz_file(image_key)):", " parser.add_argument(\"--log_prefix\", type=str, default=None, help=\"add prefix for each log directory / ログディレクトリ名の先頭に追加する文字列\")", " parser.add_argument(\"--mixed_precision\", type=str, default=\"no\",", " train_dataset.show_buckets()", " parser.add_argument(\"--seed\", type=int, default=None, help=\"random seed for training / 学習時の乱数のseed\")", " parser.add_argument(\"--train_data_dir\", type=str, default=None, help=\"directory for train images / 学習画像データのディレクトリ\")", " parser.add_argument(\"--train_batch_size\", type=int, default=1,", " parser.add_argument(\"--learning_rate\", type=float, default=2.0e-6, help=\"learning rate / 学習率\")", " if os.path.exists(npz_file_flip):", " parser.add_argument(\"--save_model_as\", type=str, default=None, choices=[None, \"ckpt\", \"safetensors\", \"diffusers\", \"diffusers_safetensors\"],", " parser.add_argument(\"--logging_dir\", type=str, default=None,", " parser.add_argument(\"--max_token_length\", type=int, default=None, choices=[None, 150, 225],", " accelerator.unwrap_model(\"dummy\", True)", " accelerator.backward(loss)", " parser.add_argument(\"--dataset_repeats\", type=int, default=None, help=\"num times to repeat dataset / 学習にデータセットを繰り返す回数\")", " if exists(row_mask):", " parser.add_argument(\"--save_precision\", type=str, default=None,", " parser.add_argument(\"--hypernetwork_module\", type=str, default=None,", " latents = self.load_latent(image_key)", " save_hypernetwork(ckpt_file, hypernetwork)", " parser.add_argument(\"--keep_tokens\", type=int, default=None,", " replace_unet_cross_attn_to_xformers()", " parser.add_argument(\"--output_dir\", type=str, default=None,", " replace_unet_cross_attn_to_memory_efficient()", " save_hypernetwork(ckpt_file, unwrap_model(hypernetwork))", " if not exists(mask):", " parser.add_argument(\"--lr_scheduler\", type=str, default=\"constant\",", " parser.add_argument(\"--save_every_n_epochs\", type=int, default=None,", " parser.add_argument(\"--in_json\", type=str, default=None, help=\"metadata file to input / 読みこむメタデータファイル\")", " hypernetwork = unwrap_model(hypernetwork)", " parser.add_argument(\"--resume\", type=str, default=None,", " parser.add_argument(\"--pretrained_model_name_or_path\", type=str, default=None,", " context = default(context, x)", " parser.add_argument(\"--max_train_steps\", type=int, default=1600, help=\"training steps / 学習ステップ数\")", " unet = unwrap_model(unet)", " parser.add_argument(\"--lr_warmup_steps\", type=int, default=0,", " if os.path.exists(npz_file_norm):", " model_util.save_diffusers_checkpoint(args.v2, out_dir, text_encoder, unet,", " text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path)", " bucket = self.buckets[self.buckets_indices[index][0]]", " image_index = self.buckets_indices[index][1] * self.batch_size", " caption = img_md.get('caption')", " tags = img_md.get('tags')", " metadata = json.load(f)", " if index == 0:", " train(args)", " super().__init__()", " m.train()", " diffusers.models.attention.CrossAttention.forward = forward_flash_attn", " context_k, context_v = self.hypernetwork.forward(x, context)", " diffusers.models.attention.CrossAttention.forward = forward_xformers", " ids_chunk = torch.cat(ids_chunk)", " # 先頭が <BOS> <PAD> ... の場合は <BOS> <EOS> <PAD> ... に変える", " model_util.save_stable_diffusion_checkpoint(args.v2, ckpt_file, text_encoder, unet,", " print(f\"Total dataset length / データセットの長さ: {len(train_dataset)}\")", "", " target = noise", " text_encoder.eval()", " params_to_optimize = params", " dv = torch.zeros_like(v)", " # Recursively walk through all the children." ]
METASEP
4
jacebrowning__memegen
jacebrowning__memegen METASEP scripts/simulate_load.py METASEP """ poetry run locust -f scripts/simulate_load.py """ from random import randint from urllib.parse import quote from locust import HttpUser, constant_pacing, task class Client(HttpUser): host = "http://localhost:5000" wait_time = constant_pacing(10) @task(10) def image_from_template(self): x = randint(1000000, 9999999) path = f"/images/fry/test-{x}.jpg" self.client.get(path) @task(1) def image_from_template_large(self): x = randint(1000000, 9999999) path = f"/images/fry/test-{x}.png?height=1000" self.client.get(path) @task(3) def image_from_custom(self): x = randint(1000000, 9999999) background = quote(f"https://memegen.link/img/grid.png?x={x}") path = f"/images/custom/test-{x}.png?height=1000&background={background}" self.client.get(path) scripts/check_deployment.py METASEP import os import pytest import requests @pytest.fixture(scope="session") def url(): return os.getenv("SITE", "http://localhost:5000") def test_get_templates(expect, url): response = requests.get(f"{url}/templates") expect(response.status_code) == 200 def test_post_images(expect, url): params = {"template_id": "iw", "text_lines": ["test", "deployment"]} response = requests.post(f"{url}/images", json=params) expect(response.status_code) == 201 expect(response.json()["url"]).endswith("/images/iw/test/deployment.png") def test_get_examples(expect, url): response = requests.get(f"{url}/examples") expect(response.status_code) == 200 def test_get_image(expect, url): response = requests.get(f"{url}/images/iw/tests_code/in_production.jpg") expect(response.status_code) == 200 expect(response.headers["Content-Type"]) == "image/jpeg" def test_get_image_custom(expect, url): response = requests.get( f"{url}/images/custom/test.png" "?alt=https://www.gstatic.com/webp/gallery/1.jpg" ) expect(response.status_code) == 200 expect(response.headers["Content-Type"]) == "image/png" def test_swagger(expect, url): response = requests.get( f"https://validator.swagger.io/validator/debug?url=" f"{url}%2Fdocs%2Fswagger.json" ) expect(response.status_code) == 200 expect(response.json()) == {} app/utils/urls.py METASEP from urllib.parse import unquote, urlencode from furl import furl from .. import settings FLAGS = { "0": False, "1": True, "false": False, "no": False, "true": True, "yes": True, } def schema(value) -> bool: return value and "://" in value def arg(data: dict, default, *names: str): for name in names: value = data.get(name) if value is not None: return value return default def flag(request, name, default=None): value = request.args.get(name, "").lower() return FLAGS.get(value, default) def add(url: str, **kwargs): joiner = "&" if "?" in url else "?" return url + joiner + urlencode(kwargs) def normalize(url: str) -> str: original = furl(url) normalized = furl(f"{settings.BASE_URL}{original.path}") if "background" in original.args: normalized.args["background"] = original.args["background"] return clean(str(normalized)) def params(**kwargs) -> dict: return {k: v for k, v in kwargs.items() if v} def clean(url: str) -> str: # Replace percent-encoded characters url = unquote(url) # Replace invalid regex escape sequences url = url.replace("\\", "~b") # Drop trailing spaces while "/_." in url: url = url.replace("/_.", ".") return url app/utils/text.py METASEP import hashlib import re from urllib.parse import unquote def encode(lines: list[str]) -> str: encoded_lines = [] for line in lines: if line == "/": encoded_lines.append("_") elif line: encoded = unquote(line) for before, after in [ ("_", "__"), ("-", "--"), (" ", "_"), ("?", "~q"), ("%", "~p"), ("#", "~h"), ('"', "''"), ("/", "~s"), ("\\", "~b"), ("\n", "~n"), ("&", "~a"), ("<", "~l"), (">", "~g"), ("‘", "'"), ("’", "'"), ("“", '"'), ("”", '"'), ("–", "-"), ]: encoded = encoded.replace(before, after) encoded_lines.append(encoded) else: encoded_lines.append("_") slug = "/".join(encoded_lines) return slug or "_" def decode(slug: str) -> list[str]: has_dash = "_----" in slug has_arrow = "_--~g" in slug slug = slug.replace("_", " ").replace(" ", "_") slug = slug.replace("-", " ").replace(" ", "-") slug = slug.replace("''", '"') if has_dash: slug = slug.replace("-- ", " --") if has_arrow: slug = slug.replace("- ~g", " -~g") for before, after in [ ("~q", "?"), ("~p", "%"), ("~h", "#"), ("~n", "\n"), ("~a", "&"), ("~l", "<"), ("~g", ">"), ("~b", "\\"), ]: slug = slug.replace(before, after) lines = slug.split("/") lines = [line.replace("~s", "/") for line in lines] return lines def normalize(slug: str) -> tuple[str, bool]: slug = unquote(slug) normalized_slug = encode(decode(slug)) return normalized_slug, slug != normalized_slug def fingerprint(value: str, *, prefix="_custom-", suffix="") -> str: return prefix + hashlib.sha1(value.encode()).hexdigest() + suffix def slugify(value: str) -> str: return re.sub(r"[^a-z0-9-]", "", value).strip("-") app/utils/meta.py METASEP from pathlib import Path from urllib.parse import unquote import aiohttp from aiocache import cached from sanic.log import logger from .. import settings def version() -> str: changelog_lines = Path("CHANGELOG.md").read_text().splitlines() version_heading = changelog_lines[2] return version_heading.split(" ")[-1] @cached(ttl=60 * 15 if settings.DEPLOYED else 0) async def authenticate(request) -> dict: info: dict = {} if settings.REMOTE_TRACKING_URL: api = settings.REMOTE_TRACKING_URL + "auth" else: return info api_key = _get_api_key(request) if api_key: api_mask = api_key[:2] + "***" + api_key[-2:] logger.info(f"Authenticating with API key: {api_mask}") async with aiohttp.ClientSession() as session: response = await session.get(api, headers={"X-API-KEY": api_key}) if response.status >= 500: settings.REMOTE_TRACKING_ERRORS += 1 logger.warning( f"Tracker error count: {settings.REMOTE_TRACKING_ERRORS}" ) else: info = await response.json() return info @cached(ttl=60 * 15 if settings.DEPLOYED else 0) async def tokenize(request, url: str) -> tuple[str, bool]: api_key = _get_api_key(request) or "" token = request.args.get("token") default_url = url.replace(f"api_key={api_key}", "").replace("?&", "?").strip("?&") if api_key == "myapikey42" and "example.png" not in url: logger.warning(f"Example API key used to tokenize: {url}") return default_url, True if settings.REMOTE_TRACKING_URL: api = settings.REMOTE_TRACKING_URL + "tokenize" else: return url, False if api_key or token: async with aiohttp.ClientSession() as session: response = await session.post( api, data={"url": default_url}, headers={"X-API-KEY": api_key} ) if response.status >= 500: settings.REMOTE_TRACKING_ERRORS += 1 logger.warning( f"Tracker error count: {settings.REMOTE_TRACKING_ERRORS}" ) return default_url, False data = await response.json() return data["url"], data["url"] != url return url, False async def custom_watermarks_allowed(request) -> bool: info = await authenticate(request) if info.get("image_access", False): return True token = request.args.get("token") if token: logger.info(f"Authenticating with token: {token}") _url, updated = await tokenize(request, request.url) return not updated return False async def get_watermark(request) -> tuple[str, bool]: watermark = request.args.get("watermark", "") if await custom_watermarks_allowed(request): if watermark == settings.DISABLED_WATERMARK: return "", False return watermark, False if watermark: if watermark == settings.DEFAULT_WATERMARK: logger.warning(f"Redundant watermark: {watermark}") return settings.DEFAULT_WATERMARK, True if watermark in settings.ALLOWED_WATERMARKS: return watermark, False logger.warning(f"Invalid watermark: {watermark}") return settings.DEFAULT_WATERMARK, True return settings.DEFAULT_WATERMARK, False async def track(request, lines: list[str]): if settings.TRACK_REQUESTS and settings.REMOTE_TRACKING_URL: api = settings.REMOTE_TRACKING_URL else: return text = " ".join(lines).strip() if not text: return if any(name in request.args for name in ["height", "width", "watermark"]): return if "localhost" in getattr(request, "host", "localhost"): return async with aiohttp.ClientSession() as session: params = dict( text=text, referer=_get_referer(request) or settings.BASE_URL, result=unquote(request.url), ) logger.info(f"Tracking request: {params}") headers = {"X-API-KEY": _get_api_key(request) or ""} response = await session.get(api, params=params, headers=headers) if response.status != 200: try: message = await response.json() except aiohttp.client_exceptions.ContentTypeError: message = await response.text() logger.error(f"Tracker response {response.status}: {message}") if response.status >= 404 and response.status not in {414, 421, 520}: settings.REMOTE_TRACKING_ERRORS += 1 logger.warning(f"Tracker error count: {settings.REMOTE_TRACKING_ERRORS}") if settings.REMOTE_TRACKING_ERRORS >= settings.REMOTE_TRACKING_ERRORS_LIMIT: settings.TRACK_REQUESTS = False logger.warning(f"Disabled tracking after {response.status} response") async def search(request, text: str, safe: bool, *, mode="") -> list[dict]: if settings.REMOTE_TRACKING_URL: api = settings.REMOTE_TRACKING_URL + mode else: return [] async with aiohttp.ClientSession() as session: params = dict( text=text, nsfw=0 if safe else 1, referer=_get_referer(request) or settings.BASE_URL, ) logger.info(f"Searching for results: {text!r} (safe={safe})") headers = {"X-API-KEY": _get_api_key(request) or ""} response = await session.get(api, params=params, headers=headers) if response.status >= 500: settings.REMOTE_TRACKING_ERRORS += 1 logger.warning(f"Tracker error count: {settings.REMOTE_TRACKING_ERRORS}") return [] data = await response.json() if response.status == 200: return data logger.error(f"Search response: {data}") return [] def _get_referer(request): return request.headers.get("referer") or request.args.get("referer") def _get_api_key(request): return request.headers.get("x-api-key") or request.args.get("api_key") app/utils/images.py METASEP from __future__ import annotations import io from pathlib import Path from typing import Iterator from PIL import ( Image, ImageDraw, ImageFilter, ImageFont, ImageOps, ImageSequence, UnidentifiedImageError, ) from sanic.log import logger from .. import settings from ..models import Font, Template, Text from ..types import Dimensions, Offset, Point EXCEPTIONS = ( OSError, SyntaxError, Image.DecompressionBombError, UnidentifiedImageError, ) def preview( template: Template, lines: list[str], *, style: str = settings.DEFAULT_STYLE, watermark: str = "", ) -> tuple[bytes, str]: path = template.build_path(lines, "", style, settings.PREVIEW_SIZE, "", "jpg") logger.info(f"Previewing meme for {path}") image = render_image( template, style, lines, settings.PREVIEW_SIZE, pad=False, is_preview=True, watermark=watermark, ) stream = io.BytesIO() image.convert("RGB").save(stream, format="JPEG", quality=50) return stream.getvalue(), "image/jpeg" def save( template: Template, lines: list[str], watermark: str = "", *, font_name: str = "", extension: str = settings.DEFAULT_EXTENSION, style: str = settings.DEFAULT_STYLE, size: Dimensions = (0, 0), maximum_frames: int = 0, directory: Path = settings.IMAGES_DIRECTORY, ) -> Path: size = fit_image(*size) path = directory / template.build_path( lines, font_name, style, size, watermark, extension, maximum_frames ) if path.exists(): if settings.DEPLOYED: logger.info(f"Loading meme from {path}") return path logger.info(f"Reloading meme at {path}") else: logger.info(f"Saving meme to {path}") path.parent.mkdir(parents=True, exist_ok=True) if extension == "gif": frames, duration = render_animation( template, lines, size, font_name, maximum_frames, watermark=watermark ) frames[0].save( path, save_all=True, append_images=frames[1:], duration=duration, loop=0 ) else: image = render_image( template, style, lines, size, font_name, watermark=watermark ) image.convert("RGB").save(path, quality=95) return path def load(path: Path) -> Image: image = Image.open(path).convert("RGBA") image = ImageOps.exif_transpose(image) return image def embed(template: Template, index: int, foreground_path: Path, background_path: Path): try: overlay = template.overlay[index] except IndexError: count = len(template.overlay) logger.error(f"Template {template.id!r} only supports {count} overlay(s)") overlay = template.overlay[count - 1] background = load(background_path) foreground = load(foreground_path) size = overlay.get_size(background.size) foreground.thumbnail(size) foreground = foreground.rotate(overlay.angle, expand=True) x1, y1, _x2, _y2 = overlay.get_box(background.size, foreground.size) background.paste(foreground, (x1, y1), mask=foreground.convert("RGBA")) background.convert("RGB").save(background_path) def render_image( template: Template, style: str, lines: list[str], size: Dimensions, font_name: str = "", *, pad: bool | None = None, is_preview: bool = False, watermark: str = "", ) -> Image: background = load(template.get_image(style)) pad = all(size) if pad is None else pad image = resize_image(background, *size, pad, expand=True) if any( ( size[0] and size[0] <= settings.PREVIEW_SIZE[0], size[1] and size[1] <= settings.PREVIEW_SIZE[1], ) ) and not (is_preview or settings.DEBUG): watermark = "" for ( point, offset, text, max_text_size, text_fill, font, stroke_width, stroke_fill, angle, ) in get_image_elements( template, lines, font_name, watermark, image.size, is_preview ): box = Image.new("RGBA", max_text_size) draw = ImageDraw.Draw(box) if settings.DEBUG: xy = (0, 0, max_text_size[0] - 1, max_text_size[1] - 1) outline = "orange" if text == settings.PREVIEW_TEXT else "lime" draw.rectangle(xy, outline=outline) rows = text.count("\n") + 1 draw.text( (-offset[0], -offset[1]), text, text_fill, font, spacing=-offset[1] / (rows * 2), align="center", stroke_width=stroke_width, stroke_fill=stroke_fill, ) box = box.rotate(angle, resample=Image.BICUBIC, expand=True) image.paste(box, point, box) if settings.DEBUG: for overlay in template.overlay: box = Image.new("RGBA", overlay.get_size(image.size)) draw = ImageDraw.Draw(box) draw.rectangle((0, 0, box.width - 1, box.height - 1), outline="fuchsia") # This offset math is inexact, but works well enough to see # approximately where rotated overlay images will be placed. # TODO: implement a proper solution using trigonometry. angle = abs(overlay.angle) if angle > 45: angle = 90 - angle offset = ( int(angle + (22.5 - angle) / 22.5 - 1), int(angle + (22.5 - angle) / 22.5 - 1), ) x1, y1, _x2, _y2 = overlay.get_box(image.size) point = (x1 - offset[0], y1 - offset[1]) box = box.rotate(overlay.angle, expand=True) image.paste(box, point, mask=box) if pad: image = add_blurred_background(image, background, *size) if watermark: image = add_watermark(image, watermark, is_preview) return image def render_animation( template: Template, lines: list[str], size: Dimensions, font_name: str = "", maximum_frames: int = 0, *, pad: bool | None = None, is_preview: bool = False, watermark: str = "", ) -> tuple[list[Image], int]: frames = [] pad = all(size) if pad is None else pad source = Image.open(template.get_image(style="animated")) total = getattr(source, "n_frames", 1) if maximum_frames >= total: modulus = 1.0 elif maximum_frames: modulus = max(1.0, round(total / maximum_frames, 1)) else: scale = min(2.0, settings.DEFAULT_SIZE[1] / size[1] if size[1] else 1.0) modulus = max(1.0, round(total / (settings.MAXIMUM_FRAMES * scale), 1)) if any( ( size[0] and size[0] <= settings.PREVIEW_SIZE[0], size[1] and size[1] <= settings.PREVIEW_SIZE[1], ) ) and not (is_preview or settings.DEBUG): watermark = "" for index, frame in enumerate(ImageSequence.Iterator(source)): if (index % modulus) >= 1: continue stream = io.BytesIO() frame.save(stream, format="GIF") background = Image.open(stream).convert("RGBA") image = resize_image(background, *size, pad, expand=False) for ( point, offset, text, max_text_size, text_fill, font, stroke_width, stroke_fill, angle, ) in get_image_elements( template, lines, font_name, watermark, image.size, is_preview, index / total ): box = Image.new("RGBA", max_text_size) draw = ImageDraw.Draw(box) if settings.DEBUG: xy = (0, 0, max_text_size[0] - 1, max_text_size[1] - 1) outline = "orange" if text == settings.PREVIEW_TEXT else "lime" draw.rectangle(xy, outline=outline) rows = text.count("\n") + 1 draw.text( (-offset[0], -offset[1]), text, text_fill, font, spacing=-offset[1] / (rows * 2), align="center", stroke_width=stroke_width, stroke_fill=stroke_fill, ) box = box.rotate(angle, resample=Image.BICUBIC, expand=True) image.paste(box, point, box) if settings.DEBUG: draw = ImageDraw.Draw(image) for overlay in template.overlay: xy = overlay.get_box(image.size) draw.rectangle(xy, outline="fuchsia") if pad: image = add_blurred_background(image, background, *size) if watermark: image = add_watermark(image, watermark, is_preview) if settings.DEBUG: image = add_counter(image, index, total, modulus) frames.append(image) ratio = len(frames) / max(total, settings.MAXIMUM_FRAMES) duration = source.info.get("duration", 100) / ratio return frames, duration def resize_image( image: Image, width: int, height: int, pad: bool, *, expand: bool ) -> Image: ratio = image.width / image.height default_width, default_height = settings.DEFAULT_SIZE if pad: if width < height * ratio: size = width, int(width / ratio) else: size = int(height * ratio), height elif width: size = width, int(width / ratio) elif height: size = int(height * ratio), height elif ratio < 1.0: if expand: size = default_width, int(default_height / ratio) else: size = int(default_width * ratio), default_height else: if expand: size = int(default_width * ratio), default_height else: size = default_width, int(default_height / ratio) image = image.resize(size, Image.LANCZOS) return image def fit_image(width: float, height: float) -> tuple[int, int]: while width * height > settings.MAXIMUM_PIXELS: width *= 0.75 height *= 0.75 return int(width), int(height) def add_blurred_background( foreground: Image, background: Image, width: int, height: int ) -> Image: base_width, base_height = foreground.size border_width = min(width, base_width + 2) border_height = min(height, base_height + 2) border_dimensions = border_width, border_height border = Image.new("RGB", border_dimensions) border.paste( foreground, ((border_width - base_width) // 2, (border_height - base_height) // 2), ) padded = background.resize((width, height), Image.LANCZOS) darkened = padded.point(lambda p: int(p * 0.4)) blurred = darkened.filter(ImageFilter.GaussianBlur(5)) blurred_width, blurred_height = blurred.size offset = ( (blurred_width - border_width) // 2, (blurred_height - border_height) // 2, ) blurred.paste(border, offset) return blurred def add_watermark(image: Image, text: str, is_preview: bool) -> Image: size = (image.size[0], settings.WATERMARK_HEIGHT) font = get_font("tiny", text, 0.0, size, 99) offset = get_text_offset(text, font, size) watermark = Text.get_error() if is_preview else Text.get_watermark() stroke_width = get_stroke_width(font) stroke_width, stroke_fill = watermark.get_stroke(stroke_width) box = Image.new("RGBA", image.size) draw = ImageDraw.Draw(box) draw.text( (3, image.size[1] - size[1] - offset[1] - 1), text, watermark.color, font, stroke_width=stroke_width, stroke_fill=stroke_fill, ) return Image.alpha_composite(image, box) def add_counter(image: Image, index: int, total: int, modulus: float) -> Image: size = (image.size[0], settings.WATERMARK_HEIGHT) text = f"{index+1:02} of {total:02} / {modulus}" font = get_font("tiny", text, 0.0, size, 99) box = Image.new("RGBA", image.size) draw = ImageDraw.Draw(box) draw.text((3, -3), text, "lime", font, stroke_width=1, stroke_fill="black") return Image.alpha_composite(image, box) def get_image_elements( template: Template, lines: list[str], font_name: str, watermark: str, image_size: Dimensions, is_preview: bool = False, percent_rendered: float | None = None, ) -> Iterator[tuple[Point, Offset, str, Dimensions, str, ImageFont, int, str, float]]: for index, text in enumerate(template.text): if percent_rendered is None: yield get_image_element( lines, index, text, font_name, image_size, watermark ) elif not text.stop or (text.start <= percent_rendered <= text.stop): yield get_image_element( lines, index, text, font_name, image_size, watermark ) else: yield get_image_element([], index, text, font_name, image_size, watermark) if is_preview: lines = [settings.PREVIEW_TEXT] index = 0 text = Text.get_preview() yield get_image_element(lines, index, text, "", image_size, watermark) def get_image_element( lines: list[str], index: int, text: Text, font_name: str, image_size: Dimensions, watermark: str, ) -> tuple[Point, Offset, str, Dimensions, str, ImageFont, int, str, float]: point = text.get_anchor(image_size, watermark) max_text_size = text.get_size(image_size) max_font_size = int(image_size[1] / (4 if text.angle else 9)) try: line = lines[index] except IndexError: line = "" else: line = text.stylize( wrap(font_name or text.font, line, max_text_size, max_font_size), lines=lines, ) font = get_font( font_name or text.font, line, text.angle, max_text_size, max_font_size ) offset = get_text_offset(line, font, max_text_size) stroke_width, stroke_fill = text.get_stroke(get_stroke_width(font)) return ( point, offset, line, max_text_size, text.color, font, stroke_width, stroke_fill, text.angle, ) def wrap(font: str, line: str, max_text_size: Dimensions, max_font_size: int) -> str: lines_1 = line lines_2 = split_2(line) lines_3 = split_3(line) font_1 = get_font(font, lines_1, 0, max_text_size, max_font_size) font_2 = get_font(font, lines_2, 0, max_text_size, max_font_size) font_3 = get_font(font, lines_3, 0, max_text_size, max_font_size) if font_1.size == font_2.size and font_2.size <= settings.MINIMUM_FONT_SIZE: return lines_2 if font_1.size >= font_2.size: return lines_1 if get_text_size(lines_3, font_3)[0] >= max_text_size[0] * 0.60: return lines_3 if get_text_size(lines_2, font_2)[0] >= max_text_size[0] * 0.60: return lines_2 return lines_1 def split_2(line: str) -> str: midpoint = len(line) // 2 - 1 for offset in range(0, len(line) // 4): for index in [midpoint - offset, midpoint + offset]: if line[index] == " ": return line[:index].strip() + "\n" + line[index:].strip() return line def split_3(line: str) -> str: max_len = len(line) / 3 words = line.split(" ") lines = ["", "", ""] index = 0 for word in words: current_len = len(lines[index]) next_len = current_len + len(word) * 0.7 if next_len > max_len: if index < 2: index += 1 lines[index] += word + " " return "\n".join(lines).strip() def get_font( name: str, text: str, angle: float, max_text_size: Dimensions, max_font_size: int, ) -> ImageFont: font_path = Font.objects.get(name or settings.DEFAULT_FONT).path max_text_width = max_text_size[0] - max_text_size[0] / 35 max_text_height = max_text_size[1] - max_text_size[1] / 10 for size in range(max(settings.MINIMUM_FONT_SIZE, max_font_size), 6, -1): font = ImageFont.truetype(str(font_path), size=size) text_width, text_height = get_text_size_minus_font_offset(text, font) if text_width <= max_text_width and text_height <= max_text_height: break return font def get_text_size_minus_font_offset(text: str, font: ImageFont) -> Dimensions: text_width, text_height = get_text_size(text, font) offset = font.getoffset(text) return text_width - offset[0], text_height - offset[1] def get_text_offset(text: str, font: ImageFont, max_text_size: Dimensions) -> Offset: text_size = get_text_size(text, font) stroke_width = get_stroke_width(font) x_offset, y_offset = font.getoffset(text) x_offset -= stroke_width y_offset -= stroke_width rows = text.count("\n") + 1 if rows >= 3: y_adjust = 1.1 else: y_adjust = 1 + (3 - rows) * 0.25 x_offset -= (max_text_size[0] - text_size[0]) / 2 y_offset -= (max_text_size[1] - text_size[1] / y_adjust) / 2 return x_offset, y_offset def get_text_size(text: str, font: ImageFont) -> Dimensions: image = Image.new("RGB", (100, 100)) draw = ImageDraw.Draw(image) text_size = draw.textsize(text, font) stroke_width = get_stroke_width(font) return text_size[0] + stroke_width, text_size[1] + stroke_width def get_stroke_width(font: ImageFont) -> int: return min(3, max(1, font.size // 12)) app/utils/http.py METASEP import asyncio import aiofiles import aiohttp import aiohttp.client_exceptions from aiopath import AsyncPath from sanic.log import logger EXCEPTIONS = ( aiohttp.client_exceptions.ClientConnectionError, aiohttp.client_exceptions.InvalidURL, aiohttp.client_exceptions.TooManyRedirects, AssertionError, asyncio.TimeoutError, UnicodeError, ) async def download(url: str, path: AsyncPath) -> bool: async with aiohttp.ClientSession() as session: try: async with session.get(url, timeout=10) as response: if response.status == 200: f = await aiofiles.open(path, mode="wb") # type: ignore await f.write(await response.read()) await f.close() return True logger.error(f"{response.status} response from {url}") except EXCEPTIONS as e: message = str(e).strip("() ") or e.__class__.__name__ logger.error(f"Invalid response from {url}: {message}") return False app/utils/html.py METASEP from .. import settings COLUMNS_STYLE = """ <style> #images { /* Prevent vertical gaps */ line-height: 0; -webkit-column-count: 6; -webkit-column-gap: 0px; -moz-column-count: 6; -moz-column-gap: 0px; column-count: 6; column-gap: 0px; } #images img { /* Just in case there are inline attributes */ width: 100% !important; height: auto !important; } @media (max-width: 1140px) { #images { -moz-column-count: 5; -webkit-column-count: 5; column-count: 5; } } @media (max-width: 960px) { #images { -moz-column-count: 4; -webkit-column-count: 4; column-count: 4; } } @media (max-width: 720px) { #images { -moz-column-count: 3; -webkit-column-count: 3; column-count: 3; } } @media (max-width: 540px) { #images { -moz-column-count: 2; -webkit-column-count: 2; column-count: 2; } } body { margin: 0; padding: 0; } </style> """.strip() REFRESH_SCRIPT = r""" <script> setInterval(function() { var images = document.images; for (var i=0; i<images.length; i++) { images[i].src = images[i].src.replace( /\btime=[^&]*/, 'time=' + new Date().getTime() ); } }, {interval}); </script> """ RESIZE_SCRIPT = """ <script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.2.11/iframeResizer.contentWindow.js" integrity="sha512-RMBWitJB1ymY4l6xeYsFwoEgVCAnOWX/zL1gNwXjlUj78nZ8SVbJsZxbH/w0p2jDNraHkOW8rzQgcJ0LNSXWBA==" crossorigin="anonymous"> </script> """ HTML = """ <!doctype html> <html> <head> {head} </head> <body> {body} </body> </html> """ def gallery( urls: list[str], *, columns: bool, refresh: int, query_string: str = "", ) -> str: extra = "&" + query_string if query_string else "" if columns: if refresh: return _columns_debug(urls, refresh, extra) return _columns(urls) assert refresh return _grid_debug(urls, refresh, extra) def _columns(urls: list[str]) -> str: elements = [] for url in urls: elements.append( f""" <a href="https://memecomplete.com/edit/{url}" target="_parent"> <img src="{url}?width={settings.PREVIEW_SIZE[0]}&frames=10"> </a> """ ) elements.append(RESIZE_SCRIPT) images = "\n".join(elements).replace("\n" + " " * 12, "\n") head = "<title>Memegen.link | examples</title>\n" + COLUMNS_STYLE body = f'<section id="images">\n{images}\n</section>' return HTML.format(head=head, body=body) def _columns_debug(urls: list[str], refresh: int, extra: str) -> str: elements = [] for url in urls: elements.append( f""" <a href="{url}"> <img src="{url}?width={settings.PREVIEW_SIZE[0]}&time=0{extra}"> </a> """ ) if refresh: elements.append(REFRESH_SCRIPT.replace("{interval}", str(refresh * 1000))) images = "\n".join(elements).replace("\n" + " " * 12, "\n") head = "<title>Memegen.link | debug</title>\n" + COLUMNS_STYLE body = f'<section id="images">\n{images}\n</section>' return HTML.format(head=head, body=body) def _grid_debug(urls: list[str], refresh: int, extra: str): elements = [] for url in urls: elements.append( f""" <a href="{url}"> <img src="{url}?time=0{extra}"> </a> """ ) elements.append(REFRESH_SCRIPT.replace("{interval}", str(refresh * 1000))) images = "\n".join(elements).replace("\n" + " " * 12, "\n") head = "<title>Memegen.link | test</title>\n" body = images return HTML.format(head=head, body=body) app/utils/__init__.py METASEP from . import html, http, images, meta, text, urls app/tests/test_views_templates.py METASEP import json import pytest def describe_list(): def describe_GET(): @pytest.mark.slow @pytest.mark.parametrize("slash", ["", "/"]) def it_returns_all_templates(expect, client, slash): request, response = client.get("/templates" + slash, timeout=10) expect(response.status) == 200 expect(len(response.json)) >= 140 @pytest.mark.slow def it_can_filter_templates(expect, client): request, response = client.get("/templates?filter=awesome", timeout=10) expect(response.status) == 200 expect(len(response.json)) == 3 def describe_detail(): def describe_GET(): @pytest.mark.parametrize("slash", ["", "/"]) def it_includes_metadata(expect, client, slash): request, response = client.get("/templates/iw" + slash) expect(response.status) == 200 expect(response.json) == { "id": "iw", "name": "Insanity Wolf", "lines": 2, "overlays": 1, "styles": ["default"], "blank": "http://localhost:5000/images/iw.png", "example": { "text": ["does testing", "in production"], "url": "http://localhost:5000/images/iw/does_testing/in_production.png", }, "source": "http://knowyourmeme.com/memes/insanity-wolf", "_self": "http://localhost:5000/templates/iw", } def it_returns_404_when_missing(expect, client): request, response = client.get("/templates/foobar") expect(response.status) == 404 def describe_POST(): @pytest.mark.parametrize("as_json", [True, False]) def it_returns_an_image_url(expect, client, as_json): data = {"text_lines[]": ["foo", "bar"], "extension": "jpg"} kwargs: dict = {"content": json.dumps(data)} if as_json else {"data": data} request, response = client.post("/templates/iw", **kwargs) expect(response.status) == 201 expect(response.json) == { "url": "http://localhost:5000/images/iw/foo/bar.jpg" } @pytest.mark.parametrize("as_json", [True, False]) def it_supports_custom_backgrounds(expect, client, as_json): data = { "background": "https://www.gstatic.com/webp/gallery/3.png", "text_lines[]": ["foo", "bar"], "extension": "jpg", } kwargs: dict = {"content": json.dumps(data)} if as_json else {"data": data} request, response = client.post("/templates/custom", **kwargs) expect(response.status) == 201 expect(response.json) == { "url": "http://localhost:5000/images/custom/foo/bar.jpg" "?background=https://www.gstatic.com/webp/gallery/3.png" } @pytest.mark.parametrize("id", ["fry", "custom"]) def it_redirects_if_requested(expect, client, id): data = {"text_lines": ["abc"], "redirect": True} request, response = client.post( f"/templates/{id}", data=data, allow_redirects=False ) redirect = f"http://localhost:5000/images/{id}/abc.png?status=201" expect(response.status) == 302 expect(response.headers["Location"]) == redirect def it_handles_unknown_template_id(expect, client, unknown_template): data = {"text_lines": ["one", "two"]} request, response = client.post( f"/templates/{unknown_template.id}", data=data ) expect(response.status) == 404 expect(response.json) == { "url": "http://localhost:5000/images/unknown/one/two.png" } app/tests/test_views_shortcuts.py METASEP import pytest from .. import settings def describe_image_redirects(): @pytest.mark.parametrize("extension", ["png", "jpg"]) def it_redirects_to_normalized_slug(expect, client, extension): request, response = client.get( f"/images/fry/One Two.{extension}", allow_redirects=False ) expect(response.status) == 301 expect(response.headers["Location"]) == f"/images/fry/One_Two.{extension}" @pytest.mark.parametrize("extension", ["png", "jpg"]) def it_preserves_query_params_when_redirecting(expect, client, extension): request, response = client.get( f"/images/custom/One Two.{extension}?alt=http://example.com", allow_redirects=False, ) redirect = f"/images/custom/One_Two.{extension}?alt=http://example.com" expect(response.status) == 301 expect(response.headers["Location"]) == redirect def it_handles_encoded_newlines(expect, client): request, response = client.get("/images/fry/1 2%0A3.jpg", allow_redirects=False) redirect = "/images/fry/1_2~n3.jpg" expect(response.status) == 301 expect(response.headers["Location"]) == redirect def describe_path_redirects(): def it_redirects_to_example_image_when_no_extension(expect, client): request, response = client.get("/images/fry", allow_redirects=False) redirect = "/images/fry/not_sure_if_trolling/or_just_stupid.png" expect(response.status) == 302 expect(response.headers["Location"]) == redirect def it_redirects_to_custom_image_when_text_but_no_extension(expect, client): request, response = client.get("/images/fry/_XD\\XD", allow_redirects=False) expect(response.status) == 302 expect(response.headers["Location"]) == "/images/fry/_XD~bXD.png" def it_returns_gallery_view_when_debug(expect, client, monkeypatch): monkeypatch.setattr(settings, "DEBUG", True) request, response = client.get("/images/fry/test") expect(response.text).contains("/images/fry/test.png") def it_rejects_unknown_templates(expect, client, unknown_template): request, response = client.get( f"/images/{unknown_template.id}", allow_redirects=False ) expect(response.status) == 404 def it_creates_new_templates_when_debug( expect, client, unknown_template, monkeypatch ): monkeypatch.setattr(settings, "DEBUG", True) request, response = client.get( f"/images/{unknown_template.id}", allow_redirects=False ) expect(response.status) == 501 expect(response.text).contains("Template not fully implemented") def it_handles_sample_templates(expect, client, monkeypatch): monkeypatch.setattr(settings, "DEBUG", True) request, response = client.get("/images/<sample>", allow_redirects=False) expect(response.status) == 501 expect(response.text).contains("Replace '&lt;sample>' in the URL") def it_handles_trailing_slashes(expect, client): request, response = client.get("/images/fry/", allow_redirects=False) redirect = "/images/fry" expect(response.status) == 302 expect(response.headers["Location"]) == redirect def describe_legacy_images(): @pytest.mark.parametrize("extension", ["png", "jpg"]) def it_redirects_to_example_image(expect, client, extension): request, response = client.get(f"/fry.{extension}", allow_redirects=False) redirect = f"/images/fry/not_sure_if_trolling/or_just_stupid.{extension}" expect(response.status) == 302 expect(response.headers["Location"]) == redirect @pytest.mark.parametrize("extension", ["png", "jpg"]) def it_redirects_to_custom_image(expect, client, extension): request, response = client.get(f"/fry/test.{extension}", allow_redirects=False) expect(response.status) == 302 expect(response.headers["Location"]) == f"/images/fry/test.{extension}" def describe_legacy_paths(): @pytest.mark.parametrize("suffix", ["", ".png", ".jpg"]) def it_rejects_unknown_templates(expect, client, unknown_template, suffix): request, response = client.get(f"/{unknown_template.id}{suffix}") expect(response.status) == 404 @pytest.mark.parametrize("suffix", ["", ".png", ".jpg"]) def it_rejects_unknown_templates_with_text( expect, client, unknown_template, suffix ): request, response = client.get(f"/{unknown_template.id}/test{suffix}") expect(response.status) == 404 def describe_legacy_params(): @pytest.mark.slow def it_accepts_alt_for_template(expect, client): request, response = client.get( "/images/custom/test.png?alt=https://www.gstatic.com/webp/gallery/3.jpg" ) expect(response.status) == 200 expect(response.headers["content-type"]) == "image/png" @pytest.mark.slow def it_accepts_alt_for_style(expect, client): request, response = client.get("/images/sad-biden/test.png?style=scowl") expect(response.status) == 200 request, response2 = client.get("/images/sad-biden/test.png?alt=scowl") expect(response.status) == 200 expect(len(response.content)) == len(response2.content) app/tests/test_views_memes.py METASEP import json from unittest.mock import AsyncMock, patch import pytest from .. import settings def describe_list(): def describe_GET(): @pytest.mark.slow def it_returns_example_image_urls(expect, client): request, response = client.get("/images", timeout=10) expect(response.status) == 200 expect(response.json).contains( { "url": "http://localhost:5000/images/iw/does_testing/in_production.png", "template": "http://localhost:5000/templates/iw", } ) @pytest.mark.slow def it_can_filter_examples(expect, client): request, response = client.get("/images?filter=awesome", timeout=10) expect(response.status) == 200 expect(len(response.json)) == 3 def describe_POST(): @pytest.mark.parametrize("as_json", [True, False]) def it_returns_an_image_url(expect, client, as_json): data = {"template_id": "iw", "text_lines[]": ["foo", "bar"]} kwargs: dict = {"content": json.dumps(data)} if as_json else {"data": data} request, response = client.post("/images", **kwargs) expect(response.status) == 201 expect(response.json) == { "url": "http://localhost:5000/images/iw/foo/bar.png" } def it_removes_redundant_styles(expect, client): data = { "template_id": "iw", "text_lines[]": ["foo", "bar"], "style[]": [" ", "test", "default"], "font": "impact", } request, response = client.post("/images", data=data) expect(response.status) == 201 expect(response.json) == { "url": "http://localhost:5000/images/iw/foo/bar.png?style=default,test&font=impact" } def it_returns_gif_when_animated(expect, client): data = { "template_id": "iw", "text_lines[]": ["foo", "bar"], "style": "animated", } request, response = client.post("/images", data=data) expect(response.status) == 201 expect(response.json) == { "url": "http://localhost:5000/images/iw/foo/bar.gif" } def it_redirects_if_requested(expect, client): data = {"template_id": "iw", "text_lines": ["abc"], "redirect": True} request, response = client.post("/images", data=data, allow_redirects=False) redirect = "http://localhost:5000/images/iw/abc.png?status=201" expect(response.status) == 302 expect(response.headers["Location"]) == redirect def it_requires_template_id(expect, client): data = {"text_lines": ["foo", "bar"]} request, response = client.post("/images", data=data) expect(response.status) == 400 expect(response.json) == {"error": '"template_id" is required'} def it_handles_unknown_template_id(expect, client, unknown_template): data = {"template_id": unknown_template.id, "text_lines": ["one", "two"]} request, response = client.post("/images", data=data) expect(response.status) == 404 expect(response.json) == { "url": "http://localhost:5000/images/unknown/one/two.png" } def it_handles_unknown_template_id_redirect(expect, client, unknown_template): data = { "template_id": unknown_template.id, "text_lines": ["one", "two"], "redirect": True, } request, response = client.post("/images", data=data, allow_redirects=False) redirect = "http://localhost:5000/images/unknown/one/two.png?status=201" expect(response.status) == 302 expect(response.headers["Location"]) == redirect def it_handles_missing_text_lines(expect, client): data = {"template_id": "iw"} request, response = client.post("/images", data=data) expect(response.status) == 201 expect(response.json) == {"url": "http://localhost:5000/images/iw.png"} def it_drops_trailing_blank_lines(expect, client): data = {"template_id": "iw", "text_lines": ["", "", "", ""]} request, response = client.post("/images", data=data) expect(response.status) == 201 expect(response.json) == {"url": "http://localhost:5000/images/iw.png"} def it_supports_slashes_to_indicate_blank_lines(expect, client): data = {"template_id": "iw", "text_lines": ["/", "2", "/", ""]} request, response = client.post("/images", data=data) expect(response.status) == 201 expect(response.json) == {"url": "http://localhost:5000/images/iw/_/2.png"} def it_handles_invalid_json(expect, client): request, response = client.post("/images", content="???") expect(response.status) == 400 expect(response.json) == {"error": '"template_id" is required'} def describe_detail(): @pytest.mark.slow @pytest.mark.parametrize( ("path", "content_type"), [ ("/images/fry.gif", "image/gif"), ("/images/fry.jpg", "image/jpeg"), ("/images/fry.png", "image/png"), ("/images/fry/test.gif", "image/gif"), ("/images/fry/test.jpg", "image/jpeg"), ("/images/fry/test.png", "image/png"), ], ) def it_returns_an_image(expect, client, path, content_type): request, response = client.get(path, timeout=10) expect(response.status) == 200 expect(response.headers["content-type"]) == content_type def it_handles_placeholder_templates(expect, client): request, response = client.get("/images/string/test.png") expect(response.status) == 200 expect(response.headers["content-type"]) == "image/png" def it_handles_unknown_templates(expect, client, unknown_template): request, response = client.get(f"/images/{unknown_template.id}/test.png") expect(response.status) == 404 expect(response.headers["content-type"]) == "image/png" def it_rejects_invalid_extensions(expect, client): request, response = client.get("/images/fry/test.foobar") expect(response.status) == 422 expect(response.headers["content-type"]) == "image/png" def it_rejects_extremely_small_sizes(expect, client): request, response = client.get("/images/fry/test.jpg?width=9") expect(response.status) == 422 expect(response.headers["content-type"]) == "image/jpeg" def it_rejects_invalid_sizes(expect, client): request, response = client.get("/images/fry/test.jpg?width=abc") expect(response.status) == 422 expect(response.headers["content-type"]) == "image/jpeg" def it_rejects_extremely_long_urls(expect, client): text = "test-" * 50 request, response = client.get(f"/images/fry/{text}.jpg") expect(response.status) == 414 expect(response.headers["content-type"]) == "image/jpeg" def describe_font(): def it_rejects_unknown_fonts(expect, client): request, response = client.get("/images/fry/test.png?font=foobar") expect(response.status) == 422 expect(response.headers["content-type"]) == "image/png" def it_ignores_placeholder_values(expect, client): request, response = client.get("/images/fry/test.png?font=string") expect(response.status) == 200 expect(response.headers["content-type"]) == "image/png" def describe_watermark(): @pytest.fixture(autouse=True) def watermark_settings(monkeypatch, client): monkeypatch.setattr(settings, "ALLOWED_WATERMARKS", ["example.com"]) @pytest.fixture def default_content(watermark_settings, client): request, response = client.get("/images/fry/test.png") return response.content def it_returns_a_unique_image(expect, client, default_content): request, response = client.get( "/images/fry/test.png?watermark=example.com", allow_redirects=False, ) expect(response.status) == 200 expect(len(response.content)) != len(default_content) @pytest.mark.parametrize("extension", ["png", "jpg"]) def it_rejects_unknown_watermarks(expect, client, extension): request, response = client.get( f"/images/fry/test.{extension}?watermark=foobar", allow_redirects=False, ) expect(response.status) == 302 expect(response.headers["Location"]) == f"/images/fry/test.{extension}" @pytest.mark.parametrize("extension", ["png", "jpg"]) def it_removes_redundant_watermarks(expect, client, extension): request, response = client.get( f"/images/fry/test.{extension}?watermark=memegen.link", allow_redirects=False, ) expect(response.status) == 302 expect(response.headers["Location"]) == f"/images/fry/test.{extension}" @patch( "app.utils.meta.authenticate", AsyncMock(return_value={"image_access": True}), ) def it_accepts_custom_values_when_authenticated(expect, client): request, response = client.get( "/images/fry/test.png?watermark=mydomain.com", allow_redirects=False, ) expect(response.status) == 200 def it_rejects_invalid_authentication(expect, client): request, response = client.get( "/images/fry/test.png?watermark=blank", headers={"X-API-KEY": "foobar"}, allow_redirects=False, ) expect(response.status) == 302 expect(response.headers["Location"]) == "/images/fry/test.png" def it_is_disabled_automatically_for_small_images(expect, client): small_content = client.get("/images/fry/test.png?width=300")[1].content request, response = client.get( "/images/fry/test.png?width=300&watermark=example.com", allow_redirects=False, ) expect(response.status) == 200 expect(len(response.content)) == len(small_content) def describe_styles(): @pytest.fixture( params=[ "/images/ds/one/two.png?", "/images/custom/test.png?background=https://www.gstatic.com/webp/gallery/3.jpg&", ] ) def base_url(request): return request.param @pytest.mark.slow def it_supports_alternate_styles(expect, client): request, response = client.get("/images/ds/one/two.png?style=maga") expect(response.status) == 200 expect(response.headers["content-type"]) == "image/png" @pytest.mark.parametrize("slug", ["ds", "ds/one/two"]) def it_redirects_to_gif_when_animated(expect, client, slug): request, response = client.get( f"/images/{slug}.png?style=animated", allow_redirects=False ) redirect = f"/images/{slug}.gif" expect(response.status) == 301 expect(response.headers["Location"]) == redirect @pytest.mark.slow def it_rejects_invalid_styles(expect, client, base_url): request, response = client.get(base_url + "style=foobar") expect(response.status) == 422 expect(response.headers["content-type"]) == "image/png" @pytest.mark.slow def it_ignores_placeholder_values(expect, client, base_url): request, response = client.get(base_url + "style=string") expect(response.status) == 200 expect(response.headers["content-type"]) == "image/png" def describe_overlay(): @pytest.fixture( params=[ "/images/fine/test.png?", "/images/custom/test.png?background=https://www.gstatic.com/webp/gallery/3.jpg&", ] ) def base_url(request): return request.param def it_supports_custom_styles(expect, client, base_url): request, response = client.get( base_url + "style=https://www.gstatic.com/webp/gallery/4.jpg" ) expect(response.status) == 200 expect(response.headers["content-type"]) == "image/png" @pytest.mark.slow def it_requires_image_urls(expect, client, base_url): request, response = client.get(base_url + "style=http://example.com") expect(response.status) == 415 expect(response.headers["content-type"]) == "image/png" @pytest.mark.slow def it_handles_missing_urls(expect, client, base_url): request, response = client.get( base_url + "style=http://example.com/does_not_exist.png" ) expect(response.status) == 415 expect(response.headers["content-type"]) == "image/png" def describe_custom(): def it_supports_custom_templates(expect, client): request, response = client.get( "/images/custom/test.png" "?background=https://www.gstatic.com/webp/gallery/3.jpg" ) expect(response.status) == 200 expect(response.headers["content-type"]) == "image/png" def it_requires_an_image_with_custom_templates(expect, client): request, response = client.get("/images/custom/test.png") expect(response.status) == 422 expect(response.headers["content-type"]) == "image/png" def it_handles_invalid_urls(expect, client): request, response = client.get("/images/custom/test.png?background=foobar") expect(response.status) == 415 expect(response.headers["content-type"]) == "image/png" def it_handles_missing_urls(expect, client): request, response = client.get( "/images/custom/test.png" "?background=http://example.com/does_not_exist.png" ) expect(response.status) == 415 expect(response.headers["content-type"]) == "image/png" def it_ignores_placeholder_values(expect, client): request, response = client.get( "/images/custom/string.png?background=string" ) expect(response.status) == 200 expect(response.headers["content-type"]) == "image/png" def describe_automatic(): def describe_POST(): def it_requires_text(expect, client): request, response = client.post("/images/automatic") expect(response.status) == 400 expect(response.json) == {"error": '"text" is required'} @patch( "app.utils.meta.search", AsyncMock( return_value=[ { "image_url": "http://example.com/images/example.png" + "?background=https://www.gstatic.com/webp/gallery/3.png", "confidence": 0.5, } ] ), ) @pytest.mark.parametrize("as_json", [True, False]) def it_normalizes_the_url(expect, client, as_json): data = {"text": "example"} kwargs: dict = {"content": json.dumps(data)} if as_json else {"data": data} request, response = client.post("/images/automatic", **kwargs) expect(response.json) == { "url": "http://localhost:5000/images/example.png" + "?background=https://www.gstatic.com/webp/gallery/3.png", "confidence": 0.5, } def it_handles_invalid_json(expect, client): request, response = client.post("/images/automatic", content="???") expect(response.status) == 400 expect(response.json) == {"error": '"text" is required'} def describe_custom(): def describe_POST(): @pytest.mark.parametrize("as_json", [True, False]) def it_supports_custom_backgrounds(expect, client, as_json): data = { "background": "https://www.gstatic.com/webp/gallery/3.png", "text_lines[]": ["foo", "bar"], "extension": "jpg", } kwargs: dict = {"content": json.dumps(data)} if as_json else {"data": data} request, response = client.post("/images/custom", **kwargs) expect(response.status) == 201 expect(response.json) == { "url": "http://localhost:5000/images/custom/foo/bar.jpg" "?background=https://www.gstatic.com/webp/gallery/3.png" } def it_redirects_if_requested(expect, client): data = { "background": "https://www.gstatic.com/webp/gallery/4.png", "text_lines": ["abc"], "redirect": True, } request, response = client.post( "/images/custom", data=data, allow_redirects=False ) redirect = "http://localhost:5000/images/custom/abc.png?background=https://www.gstatic.com/webp/gallery/4.png&status=201" expect(response.status) == 302 expect(response.headers["Location"]) == redirect def describe_GET(): @patch( "app.utils.meta.search", AsyncMock( return_value=[{"image_url": "http://example.com/images/example.png"}] ), ) def it_normalizes_the_url(expect, client): request, response = client.get("/images/custom") expect(response.json) == [ {"url": "http://localhost:5000/images/example.png"} ] @patch( "app.utils.meta.search", AsyncMock( return_value=[ { "image_url": "http://example.com/images/example.png" + "?background=https://www.gstatic.com/webp/gallery/3.png" } ] ), ) def it_normalizes_the_url_with_background(expect, client): request, response = client.get("/images/custom") expect(response.json) == [ { "url": "http://localhost:5000/images/example.png" + "?background=https://www.gstatic.com/webp/gallery/3.png" } ] app/tests/test_views_clients.py METASEP import pytest def describe_auth(): def describe_POST(): def it_returns_401_when_unauthenticated(expect, client): request, response = client.post("/auth") expect(response.status) == 401 expect(response.json) == {"error": "API key missing or invalid."} def describe_fonts(): def describe_GET(): def it_returns_all_fonts(expect, client): request, response = client.get("/fonts") expect(len(response.json)) == 6 def describe_image_preview(): @pytest.fixture def path(): return "/images/preview.jpg" def it_returns_an_image(expect, client, path): request, response = client.get(path) expect(response.status) == 200 expect(response.headers["content-type"]) == "image/jpeg" def it_supports_custom_templates(expect, client, path): request, response = client.get( path + "?template=https://www.gstatic.com/webp/gallery/1.png" ) expect(response.status) == 200 expect(response.headers["content-type"]) == "image/jpeg" def it_handles_invalid_urls(expect, client, path): request, response = client.get(path + "?template=http://example.com/foobar.jpg") expect(response.status) == 200 expect(response.headers["content-type"]) == "image/jpeg" def it_handles_invalid_keys(expect, client, path, unknown_template): request, response = client.get(path + f"?template={unknown_template.id}") expect(response.status) == 200 expect(response.headers["content-type"]) == "image/jpeg" app/tests/test_utils_text.py METASEP import pytest from .. import utils LINES_SLUG = [ (["hello world"], "hello_world"), (["?%#/&\\<>"], "~q~p~h~s~a~b~l~g"), (["a/b", "c"], "a~sb/c"), (["variable_name"], "variable__name"), (["variable-name"], "variable--name"), (["foo\nbar"], "foo~nbar"), (["def data() -> Dict"], "def_data()_--~g_Dict"), (["finish <- start"], "finish_~l--_start"), (['That\'s not how "this" works'], "That's_not_how_''this''_works"), (["git commit --no-verify"], "git_commit_----no--verify"), ] @pytest.mark.parametrize(("lines", "slug"), LINES_SLUG) def test_encode(expect, lines, slug): expect(utils.text.encode(lines)) == slug @pytest.mark.parametrize(("lines", "slug"), LINES_SLUG) def test_decode(expect, lines, slug): expect(utils.text.decode(slug)) == lines def test_decode_dashes(expect): expect(utils.text.decode("hello-world")) == ["hello world"] def test_encode_quotes(expect): expect( utils.text.encode(["it’ll be great “they” said"]) ) == 'it\'ll_be_great_"they"_said' def test_encode_dashes(expect): expect(utils.text.encode(["1–2 in. of snow"])) == "1-2_in._of_snow" app/tests/test_utils_meta.py METASEP import pytest from .. import settings, utils def describe_tokenize(): @pytest.mark.asyncio async def it_restricts_sample_api_key_usage(expect, request): request.args = {"api_key": "myapikey42"} request.headers = {} url, updated = await utils.meta.tokenize( request, "http://api.memegen.link/images/fry/test.png?api_key=myapikey42" ) expect(url) == "http://api.memegen.link/images/fry/test.png" expect(updated) == True def describe_track(): @pytest.mark.asyncio async def it_is_disabled_automatically_after_error(expect, monkeypatch, request): monkeypatch.setattr(settings, "REMOTE_TRACKING_URL", "http://example.com/404") monkeypatch.setattr(settings, "REMOTE_TRACKING_ERRORS_LIMIT", 1) request.args = {} request.headers = {} request.host = "example.com" request.url = "http://example.com" await utils.meta.track(request, ["foo"]) await utils.meta.track(request, ["bar"]) expect(settings.TRACK_REQUESTS) == False app/tests/test_utils_images.py METASEP import os import shutil import time from pathlib import Path import pytest from .. import models, settings, utils @pytest.fixture(scope="session") def images(): path = settings.TEST_IMAGES_DIRECTORY flag = path / ".flag" if flag.exists(): age = time.time() - flag.stat().st_mtime if age > 60 * 60 * 6 and "SKIP_SLOW" not in os.environ: shutil.rmtree(path) path.mkdir(exist_ok=True) flag.touch() return path @pytest.fixture(scope="session") def template(): return models.Template.objects.get("icanhas") # Formats @pytest.mark.slow @pytest.mark.parametrize(("id", "lines", "extension"), settings.TEST_IMAGES) def test_images(images, id, lines, extension): template = models.Template.objects.get(id) utils.images.save(template, lines, extension=extension, directory=images) # Size def test_smaller_width(images, template): utils.images.save(template, ["width=250"], size=(250, 0), directory=images) def test_smaller_height(images, template): utils.images.save(template, ["height=250"], size=(0, 250), directory=images) def test_larger_width(images, template): utils.images.save(template, ["width=500"], size=(500, 0), directory=images) def test_larger_height(images, template): utils.images.save(template, ["height=500"], size=(0, 500), directory=images) def test_wide_padding(images, template): lines = ["width=600", "height=400"] utils.images.save(template, lines, size=(600, 400), directory=images) def test_tall_padding(images, template): lines = ["width=400", "height=600"] utils.images.save(template, lines, size=(400, 600), directory=images) def test_small_padding(images, template): lines = ["width=50", "height=50"] utils.images.save(template, lines, size=(50, 50), directory=images) @pytest.mark.slow def test_large_padding(images, template): lines = ["width=2000", "height=2000"] utils.images.save(template, lines, size=(2000, 2000), directory=images) # Templates @pytest.mark.asyncio async def test_custom_template(images): url = "https://www.gstatic.com/webp/gallery/2.jpg" template = await models.Template.create(url) utils.images.save(template, ["", "My Custom Template"], directory=images) @pytest.mark.slow @pytest.mark.asyncio async def test_custom_template_with_exif_rotation(images): url = "https://cdn.discordapp.com/attachments/752902976322142218/752903391281283152/20200608_111430.jpg" template = await models.Template.create(url) utils.images.save(template, ["", "This should not be rotated!"], directory=images) def test_unknown_template(images): template = models.Template.objects.get("_error") utils.images.save(template, ["UNKNOWN TEMPLATE"], directory=images) # Styles @pytest.mark.slow def test_alternate_style(images): template = models.Template.objects.get("ds") lines = ["one", "two", "three"] utils.images.save(template, lines, style="maga", directory=images) @pytest.mark.slow @pytest.mark.asyncio async def test_custom_style(images): url = "https://sn56.scholastic.com/content/dam/classroom-magazines/sn56/issues/2019-20/031620/coronavirus/16-SN56-20200316-VirusOutbreak-PO-2.png" template = models.Template.objects.get("fine") await template.check(url, force=True) lines = ["102 °F", "this is fine"] utils.images.save(template, lines, style=url, directory=images) @pytest.mark.slow @pytest.mark.asyncio async def test_custom_style_rotated(images): style = "https://i.imgur.com/6hwAxmO.jpg,https://i.imgur.com/6hwAxmO.jpg" template = models.Template.objects.get("same") await template.check(style, force=True) utils.images.save(template, [], style=style, directory=images) # Text def test_special_characters(images, template): lines = ["Special? 100% #these-memes", "template_rating: 9/10"] utils.images.save(template, lines, directory=images) @pytest.mark.skipif("CIRCLECI" in os.environ, reason="Long filenames not supported") def test_extremely_long_text(images, tmpdir): template = models.Template.objects.get("fry") lines = ["", "word " * 40] utils.images.save(template, lines, directory=Path(tmpdir) / "images") def test_long_first_word(images): template = models.Template.objects.get("fine") lines = ["", "thiiiiiiiiiiiiiiiiiiiiis will probably be fine right now"] utils.images.save(template, lines, directory=images) @pytest.mark.slow def test_text_wrap_when_font_is_too_small(images): template = models.Template.objects.get("ds") lines = ["this button seems to be ok to push"] utils.images.save(template, lines, directory=images) def test_text_wrap_on_small_images(images): template = models.Template.objects.get("pigeon") lines = ["", "multiple words here"] utils.images.save(template, lines, size=(0, 300), directory=images) def test_text_wrap_on_smaller_images(images): template = models.Template.objects.get("toohigh") lines = ["", "the number of sample memes is too damn high!"] utils.images.save(template, lines, size=(0, 200), directory=images) @pytest.mark.slow def test_descender_vertical_alignment(images): template = models.Template.objects.get("ptj") lines = [ "Exit", "Exit", "the", "the", "monorepo", "monorepo", "Exit the monorepo.", "Stop testing!", ] utils.images.save(template, lines, directory=images) # Fonts def test_font_override(images, template): lines = ["custom", "font"] utils.images.save(template, lines, font_name="comic", directory=images) # Watermark def test_watermark(images, template): lines = ["nominal image", "with watermark"] utils.images.save(template, lines, "Example.com", directory=images) def test_watermark_with_padding(images, template): lines = ["paddded image", "with watermark"] utils.images.save(template, lines, "Example.com", size=(500, 500), directory=images) def test_watermark_disabled_when_small(images, template): lines = ["small image", "with watermark (disabled)"] utils.images.save(template, lines, "Example.com", size=(300, 0), directory=images) @pytest.mark.slow def test_watermark_with_many_lines(images): template = models.Template.objects.get("ptj") lines = ["", "", "", "", "", "", "Has a watermark.", "Doesn't have a watermark!"] utils.images.save(template, lines, "Example.com", directory=images) # Debug @pytest.mark.parametrize(("extension"), ["png", "gif"]) def test_debug_images(images, monkeypatch, extension): monkeypatch.setattr(settings, "DEBUG", True) id, lines, _extension = settings.TEST_IMAGES[0] template = models.Template.objects.get(id) lines = [lines[0], lines[1] + " (debug)"] utils.images.save( template, lines, directory=images, extension=extension, maximum_frames=5 ) def test_deployed_images(images, monkeypatch): monkeypatch.setattr(settings, "DEPLOYED", True) id, lines, _extension = settings.TEST_IMAGES[0] template = models.Template.objects.get(id) utils.images.save(template, lines, directory=images) monkeypatch.delattr(utils.images, "render_image") utils.images.save(template, lines, directory=images) def test_preview_images(images, template): path = images / "preview.jpg" data, _extension = utils.images.preview(template, ["nominal image", "while typing"]) path.write_bytes(data) app/tests/test_models_text.py METASEP import pytest from ..models import Text def describe_text(): def describe_stylize(): @pytest.mark.parametrize( ("style", "before", "after"), [ ("none", "Hello, world!", "Hello, world!"), ("default", "these are words.", "These are words."), ("default", "These ARE words.", "These ARE words."), ("upper", "Hello, world!", "HELLO, WORLD!"), ("lower", "Hello, world!", "hello, world!"), ("title", "these are words", "These Are Words"), ("capitalize", "these are words", "These are words"), ("mock", "these are words", "ThEsE aRe WorDs"), ("<unknown>", "Hello, world!", "Hello, world!"), ], ) def it_applies_style(expect, style, before, after): text = Text() text.style = style expect(text.stylize(before)) == after def it_defaults_to_upper(expect): text = Text() text.style = "" expect(text.stylize("Foobar")) == "FOOBAR" def it_respects_case_when_set_in_any_line(expect): text = Text(style="default") expect(text.stylize("foo", lines=["foo", " ", "bar"])) == "Foo" expect(text.stylize("foo", lines=["foo", " ", "Bar"])) == "foo" app/tests/test_models_template.py METASEP from pathlib import Path import datafiles import log import pytest from ..models import Overlay, Template, Text def describe_template(): @pytest.fixture def template(): t = Template.objects.get("_test") t.clean() yield t t.clean() def describe_str(): def it_includes_the_path(expect, template): expect(str(template)).endswith("/memegen/templates/_test") def describe_valid(): def it_only_includes_default_style_with_custom_overlay( expect, template, monkeypatch ): monkeypatch.setattr(datafiles.settings, "HOOKS_ENABLED", False) template.overlay = [Overlay()] expect(template.styles) == [] del template.styles template.overlay[0].center_x = 0.123 expect(template.styles) == ["default"] def describe_text(): def it_defaults_to_two_lines(expect, template): expect(template.text) == [Text(), Text(anchor_x=0.0, anchor_y=0.8)] def describe_image(): def it_has_generic_extension_when_absent(expect, template): expect(template.image) == Path.cwd() / "templates" / "_test" / "default.img" def it_creates_template_directory_automatically(expect): template = Template.objects.get_or_create("_custom-empty") template.datafile.path.unlink(missing_ok=True) template.datafile.path.parent.rmdir() log.info(template.image) expect(template.datafile.path.parent.exists()) == True def describe_create(): @pytest.mark.asyncio async def it_downloads_the_image(expect): url = "https://www.gstatic.com/webp/gallery/1.jpg" path = ( Path.cwd() / "templates" / "_custom-2d3c91e23b91d6387050e85efc1f3acb39b5a95d" / "default.jpg" ) template = await Template.create(url, force=True) expect(template.image) == path expect(template.image.exists()) == True @pytest.mark.asyncio async def it_handles_missing_urls(expect): url = "http://example.com/does_not_exist.png" template = await Template.create(url) expect(template.image.exists()) == False @pytest.mark.asyncio async def it_handles_unreachable_urls(expect): url = "http://127.0.0.1/does_not_exist.png" template = await Template.create(url) expect(template.image.exists()) == False @pytest.mark.asyncio @pytest.mark.parametrize( "url", [ "httpshttps://cdn.pixabay.com/photo/2015/09/09/19/41/cat-932846_1280.jpg", "https://https://i.imgur.com/bf995.gif&width=400", ], ) async def it_handles_invalid_urls(expect, url): template = await Template.create(url) expect(template.valid) == False @pytest.mark.asyncio async def it_rejects_non_images(expect): url = "https://file-examples-com.github.io/uploads/2017/04/file_example_MP4_480_1_5MG.mp4" template = await Template.create(url) expect(template.image.exists()) == False @pytest.mark.asyncio @pytest.mark.parametrize("subdomain", ["api", "staging"]) async def it_handles_builtin_templates(expect, subdomain): url = f"http://{subdomain}.memegen.link/images/fry.png" template = await Template.create(url) expect(template.id) == "fry" @pytest.mark.asyncio async def it_handles_invalid_builtin_templates(expect): url = "http://api.memegen.link/images/fry2.png" template = await Template.create(url) expect(template.id) == "_error" @pytest.mark.asyncio async def it_handles_custom_templates(expect): url = "http://api.memegen.link/images/custom.png?background=https://www.gstatic.com/webp/gallery/1.jpg" template = await Template.create(url) expect(template.id) == "_custom-2d3c91e23b91d6387050e85efc1f3acb39b5a95d" @pytest.mark.asyncio async def it_handles_custom_templates_lacking_background(expect): url = "http://api.memegen.link/images/custom.png?background" template = await Template.create(url) expect(template.id) == "_error" @pytest.mark.asyncio async def it_handles_custom_templates_with_invalid_background(expect): url = "http://api.memegen.link/images/custom.png?background=https://https://example.com" template = await Template.create(url) expect(template.id) == "_error" @pytest.mark.asyncio async def it_handles_meme_urls(expect): url = "http://api.memegen.link/images/fry/test.png" template = await Template.create(url) expect(template.id) == "fry" def describe_check(): @pytest.mark.asyncio async def it_determines_overlay_file_extension(expect): url = "https://i.guim.co.uk/img/media/8a13052d4db7dcd508af948e5db7b04598e03190/0_294_5616_3370/master/5616.jpg?width=1200&height=1200&quality=85&auto=format&fit=crop&s=bcaa4eed2c1e6dab61c41a61e41433d9" template = Template.objects.get("fine") expect(await template.check(url, force=True)) == True @pytest.mark.asyncio async def it_assumes_extension_when_unknown(expect): url = "https://camo.githubusercontent.com/ce9c7a173f38722e129d5ae832a11c928ff72683fae74cbcb9fff41fd9957e63/68747470733a2f2f75706c6f61642e77696b696d656469612e6f72672f77696b6970656469612f636f6d6d6f6e732f7468756d622f332f33662f4769745f69636f6e2e7376672f3130323470782d4769745f69636f6e2e7376672e706e67" template = Template.objects.get("fine") expect(await template.check(url, force=True)) == True @pytest.mark.asyncio async def it_accepts_multiple_urls(expect): style = ",".join( [ "https://user-images.githubusercontent.com/674621/71187801-14e60a80-2280-11ea-94c9-e56576f76baf.png", "https://i.stack.imgur.com/PvgbL.png", "https://www.nicepng.com/png/detail/13-139107_notepad-icon-icon-anotacoes-png.png", ] ) template = Template.objects.get("perfection") expect(await template.check(style)) == True @pytest.mark.asyncio async def it_accepts_default_style_as_placeholder(expect): style = "default,https://i.stack.imgur.com/PvgbL.png" template = Template.objects.get("perfection") expect(await template.check(style)) == True app/tests/test_main.py METASEP import pytest from .. import settings def describe_index(): def it_redirects_to_the_docs(expect, client): request, response = client.get("/") expect(response.status) == 200 expect(response.text).contains("openapi.json") def it_contains_favicon(expect, client): request, response = client.get("/favicon.ico") expect(response.status) == 200 def it_contains_robots(expect, client): request, response = client.get("/robots.txt") expect(response.status) == 200 expect(response.text).contains("Allow: /\n") def describe_examples(): @pytest.mark.slow def it_displays_images(expect, client): request, response = client.get("/examples", timeout=10) expect(response.status) == 200 expect(response.text.count("img")) > 100 expect(response.text).excludes("setInterval") @pytest.mark.slow def it_can_enable_automatic_refresh(expect, client, monkeypatch): monkeypatch.setattr(settings, "DEBUG", True) request, response = client.get("/examples?debug=true", timeout=10) expect(response.status) == 200 expect(response.text.count("img")) > 100 expect(response.text).includes("setInterval") def describe_test(): def it_redirects_to_the_index(expect, client): request, response = client.get("/test", allow_redirects=False) expect(response.status) == 302 expect(response.headers["Location"]) == "/" def it_displays_test_images_when_debug(expect, client, monkeypatch): monkeypatch.setattr(settings, "DEBUG", True) request, response = client.get("/test", allow_redirects=False) expect(response.status) == 200 expect(response.text.count("img")) > 5 expect(response.text.count("img")) < 100 app/tests/test_docs.py METASEP from pkg_resources import get_distribution def describe_spec(): def it_contains_the_version(expect, client): version = get_distribution("memegen").version request, response = client.get("/docs/openapi.json") expect(response.status) == 200 expect(response.json["info"]["version"]) == version app/tests/conftest.py METASEP import os import pytest from .. import settings from ..main import app from ..models import Template def pytest_configure(config): terminal = config.pluginmanager.getplugin("terminal") terminal.TerminalReporter.showfspath = False def pytest_runtest_setup(item): for marker in item.iter_markers(name="slow"): if "SKIP_SLOW" in os.environ: pytest.skip("slow test") @pytest.fixture def client(monkeypatch): monkeypatch.setattr(settings, "REMOTE_TRACKING_URL", None) return app.test_client @pytest.fixture def unknown_template(): template = Template.objects.get_or_create("unknown") template.delete() yield template template.delete() app/tests/__init__.py METASEP app/models/text.py METASEP from dataclasses import dataclass from sanic.log import logger from spongemock import spongemock from .. import settings from ..types import Dimensions, Point @dataclass class Text: style: str = "upper" color: str = "white" font: str = settings.DEFAULT_FONT anchor_x: float = 0.0 anchor_y: float = 0.0 angle: float = 0 scale_x: float = 1.0 scale_y: float = 0.2 start: float = 0.0 stop: float = 1.0 @classmethod def get_preview(cls) -> "Text": return cls( color="#80808060", anchor_x=0.075, anchor_y=0.05, angle=10, scale_x=0.75, scale_y=0.75, ) @classmethod def get_error(cls) -> "Text": return cls(color="yellow", anchor_x=0.5) @classmethod def get_watermark(cls) -> "Text": return cls(color="#FFFFFF85") def get_anchor(self, image_size: Dimensions, watermark: str = "") -> Point: image_width, image_height = image_size anchor = int(image_width * self.anchor_x), int(image_height * self.anchor_y) if watermark and self.anchor_x <= 0.1 and self.anchor_y >= 0.8: anchor = anchor[0], anchor[1] - settings.WATERMARK_HEIGHT // 2 return anchor def get_size(self, image_size: Dimensions) -> Dimensions: image_width, image_height = image_size size = int(image_width * self.scale_x), int(image_height * self.scale_y) return size def get_stroke(self, width: int) -> tuple[int, str]: if self.color == "black": width = 1 color = "#FFFFFF85" elif "#" in self.color: width = 1 color = "#000000" + self.color[-2:] else: color = "black" return width, color def stylize(self, text: str, **kwargs) -> str: lines = [line for line in kwargs.get("lines", [text]) if line.strip()] if self.style == "none": return text if self.style == "default": text = text.capitalize() if all(line.islower() for line in lines) else text return text if self.style == "mock": return spongemock.mock(text, diversity_bias=0.75, random_seed=0) method = getattr(text, self.style or self.__class__.style, None) if method: return method() logger.warning(f"Unsupported text style: {self.style}") return text app/models/template.py METASEP import asyncio import shutil from functools import cached_property from pathlib import Path import aiopath from datafiles import datafile, field from furl import furl from sanic import Request from sanic.log import logger from .. import settings, utils from ..types import Dimensions from .overlay import Overlay from .text import Text @datafile("../../templates/{self.id}/config.yml", defaults=True) class Template: id: str name: str = "" source: str | None = None text: list[Text] = field( default_factory=lambda: [Text(), Text(anchor_x=0.0, anchor_y=0.8)] ) example: list[str] = field(default_factory=lambda: ["Top Line", "Bottom Line"]) overlay: list[Overlay] = field(default_factory=lambda: [Overlay()]) def __str__(self): return str(self.directory) def __lt__(self, other): return self.id < other.id @cached_property def valid(self) -> bool: if not settings.DEPLOYED: self._update_example() self.datafile.save() return ( not self.id.startswith("_") and self.image.suffix != settings.PLACEHOLDER_SUFFIX ) def _update_example(self): for line in self.example: if line and not line.isupper(): return self.example = [line.lower() for line in self.example] @cached_property def styles(self): styles = [] for path in self.directory.iterdir(): if not path.stem[0] in {".", "_"} and path.stem not in { "config", settings.DEFAULT_STYLE, }: styles.append(path.stem) if styles or self.overlay != [Overlay()]: styles.append("default") styles.sort() return styles @cached_property def directory(self) -> Path: return self.datafile.path.parent @cached_property def image(self) -> Path: return self.get_image() def get_image(self, style: str = "") -> Path: style = style or settings.DEFAULT_STYLE if utils.urls.schema(style): url = style style = utils.text.fingerprint(url) self.directory.mkdir(exist_ok=True) for path in self.directory.iterdir(): if path.stem == style and path.suffix != settings.PLACEHOLDER_SUFFIX: return path if style == settings.DEFAULT_STYLE: logger.debug(f"No default background image for template: {self.id}") return self.directory / ( settings.DEFAULT_STYLE + settings.PLACEHOLDER_SUFFIX ) logger.warning(f"Style {style!r} not available for template: {self.id}") return self.get_image() def jsonify(self, request: Request) -> dict: return { "id": self.id, "name": self.name, "lines": len(self.text), "overlays": len(self.overlay) if self.styles else 0, "styles": self.styles, "blank": request.app.url_for( "Memes.blank", template_id=self.id + "." + settings.DEFAULT_EXTENSION, _external=True, _scheme=settings.SCHEME, ), "example": { "text": self.example if any(self.example) else [], "url": self.build_example_url(request), }, "source": self.source, "_self": self.build_self_url(request), } def build_self_url(self, request: Request) -> str: return request.app.url_for( "Templates.detail", id=self.id, _external=True, _scheme=settings.SCHEME, ) def build_example_url( self, request: Request, *, extension: str = settings.DEFAULT_EXTENSION, external: bool = True, ) -> str: kwargs = { "template_id": self.id, "text_paths": utils.text.encode(self.example) + "." + extension, "_external": external, } if external: kwargs["_scheme"] = settings.SCHEME url = request.app.url_for("Memes.text", **kwargs) return utils.urls.clean(url) def build_custom_url( self, request: Request, text_lines: list[str], *, extension: str = settings.DEFAULT_EXTENSION, background: str = "", style: str = "", font: str = "", ): if extension not in settings.ALLOWED_EXTENSIONS: extension = settings.DEFAULT_EXTENSION if style == settings.DEFAULT_STYLE: style = "" url = request.app.url_for( "Memes.text", template_id="custom" if self.id == "_custom" else self.id, text_paths=utils.text.encode(text_lines) + "." + extension, _external=True, _scheme=settings.SCHEME, **utils.urls.params(background=background, style=style, font=font), ) return utils.urls.clean(url) def build_path( self, text_lines: list[str], font_name: str, style: str, size: Dimensions, watermark: str, extension: str, frames: int = 0, ) -> Path: slug = utils.text.encode(text_lines) variant = str(self.text) + font_name + style + str(size) + watermark if frames: variant += str(frames) fingerprint = utils.text.fingerprint(variant, prefix="") filename = f"{slug}.{fingerprint}.{extension}" return Path(self.id) / filename @classmethod async def create(cls, url: str, *, force=False) -> "Template": try: parsed = furl(url) except ValueError as e: logger.error(e) return cls.objects.get("_error") if parsed.netloc and "memegen.link" in parsed.netloc: logger.info(f"Handling template URL: {url}") if len(parsed.path.segments) > 1: id = Path(parsed.path.segments[1]).stem if id != "custom": return cls.objects.get_or_none(id) or cls.objects.get("_error") background = parsed.args.get("background") if not background: return cls.objects.get("_error") url = background try: parsed = furl(url) except ValueError as e: logger.error(e) return cls.objects.get("_error") id = utils.text.fingerprint(url) template = cls.objects.get_or_create(id, url) suffix = Path(str(parsed.path)).suffix if not suffix or len(suffix) > 10: logger.warning(f"Unable to determine image extension: {url}") suffix = settings.PLACEHOLDER_SUFFIX filename = "default" + suffix path = aiopath.AsyncPath(template.directory) / filename if await path.exists() and not settings.DEBUG and not force: logger.info(f"Found background {url} at {path}") return template logger.info(f"Saving background {url} to {path}") if not await utils.http.download(url, path): return template try: await asyncio.to_thread(utils.images.load, Path(path)) except utils.images.EXCEPTIONS as e: logger.error(e) await path.unlink(missing_ok=True) return template async def check(self, style: str, *, force=False) -> bool: if style in {"", None, settings.DEFAULT_STYLE}: return True if style in self.styles: return True if not utils.urls.schema(style): logger.error(f"Invalid style for {self.id} template: {style}") return False filename = utils.text.fingerprint(style, suffix=self.image.suffix) path = aiopath.AsyncPath(self.directory) / filename if await path.exists() and not settings.DEBUG and not force: logger.info(f"Found overlay {style} at {path}") return True urls = style.split(",") logger.info(f"Embeding {len(urls)} overlay image(s) onto {path}") await asyncio.to_thread(shutil.copy, self.image, path) embedded = 0 for index, url in enumerate(urls): success = await self._embed(index, url, path, force) if success: embedded += 1 if len(urls) == 1 and not embedded: await path.unlink() return embedded == len(urls) async def _embed( self, index: int, url: str, background: aiopath.AsyncPath, force: bool ) -> bool: if url.strip() in {"", settings.DEFAULT_STYLE}: return True suffix = Path(str(furl(url).path)).suffix if not suffix: logger.warning(f"Unable to determine image extension: {url}") suffix = ".png" filename = utils.text.fingerprint(url, prefix="_embed-", suffix=suffix) foreground = aiopath.AsyncPath(self.directory) / filename if await foreground.exists() and not settings.DEBUG and not force: logger.info(f"Found overlay {url} at {foreground}") else: logger.info(f"Saving overlay {url} to {foreground}") await utils.http.download(url, foreground) try: await asyncio.to_thread( utils.images.embed, self, index, Path(foreground), Path(background) ) except utils.images.EXCEPTIONS as e: logger.error(e) await foreground.unlink(missing_ok=True) return await foreground.exists() def clean(self): for path in self.directory.iterdir(): if path.stem not in {"config", "default"}: path.unlink() def delete(self): if self.directory.exists(): shutil.rmtree(self.directory) def matches(self, query: str) -> bool: example = " ".join(line.lower() for line in self.example) return any((query in self.id, query in self.name.lower(), query in example)) app/models/overlay.py METASEP from dataclasses import dataclass from ..types import Box, Dimensions @dataclass class Overlay: center_x: float = 0.5 center_y: float = 0.5 angle: float = 0.0 scale: float = 0.25 def get_size(self, background_size: Dimensions) -> Dimensions: background_width, background_height = background_size dimension = min( int(background_width * self.scale), int(background_height * self.scale), ) return dimension, dimension def get_box( self, background_size: Dimensions, foreground_size: Dimensions | None = None ) -> Box: background_width, background_height = background_size if foreground_size is None: foreground_size = self.get_size(background_size) foreground_width, foreground_height = foreground_size box = ( int(background_width * self.center_x - foreground_width / 2), int(background_height * self.center_y - foreground_height / 2), int(background_width * self.center_x + foreground_width / 2), int(background_height * self.center_y + foreground_height / 2), ) return box app/models/font.py METASEP from __future__ import annotations from dataclasses import KW_ONLY, asdict, dataclass from pathlib import Path from .. import settings class Manager: @staticmethod def get(name: str) -> Font: name = name or settings.DEFAULT_FONT for font in FONTS: if name in (font.id, font.alias): return font raise ValueError(f"Unknown font: {name}") @staticmethod def all() -> list[Font]: return FONTS @dataclass class Font: filename: str id: str _: KW_ONLY alias: str = "" objects = Manager() @property def path(self) -> Path: return settings.ROOT / "fonts" / self.filename @property def data(self) -> dict: return asdict(self) FONTS = [ Font("TitilliumWeb-Black.ttf", "titilliumweb", alias="thick"), Font("NotoSans-Bold.ttf", "notosans"), Font("Kalam-Regular.ttf", "kalam", alias="comic"), Font("Impact.ttf", "impact"), Font("TitilliumWeb-SemiBold.ttf", "titilliumweb-thin", alias="thin"), Font("Segoe UI Bold.ttf", "segoe", alias="tiny"), ] app/models/__init__.py METASEP from .font import Font from .overlay import Overlay from .template import Template from .text import Text app/types.py METASEP Box = tuple[int, int, int, int] Dimensions = tuple[int, int] Point = tuple[int, int] Offset = tuple[int, int] app/settings.py METASEP import os from pathlib import Path ROOT = Path(__file__).parent.parent.resolve() # Server configuration PORT = int(os.environ.get("PORT", 5000)) WORKERS = int(os.environ.get("WEB_CONCURRENCY", 1)) DEBUG = bool(os.environ.get("DEBUG", False)) if "DOMAIN" in os.environ: # staging / production SERVER_NAME = os.environ["DOMAIN"] RELEASE_STAGE = "staging" if "staging" in SERVER_NAME else "production" SCHEME = "https" elif "HEROKU_APP_NAME" in os.environ: # review apps SERVER_NAME = os.environ["HEROKU_APP_NAME"] + ".herokuapp.com" RELEASE_STAGE = "review" SCHEME = "https" else: # localhost SERVER_NAME = f"localhost:{PORT}" RELEASE_STAGE = "local" SCHEME = "http" BASE_URL = f"{SCHEME}://{SERVER_NAME}" DEPLOYED = RELEASE_STAGE != "local" and not DEBUG # API SUFFIX = " [DEBUG ONLY]" if not DEPLOYED else "" PLACEHOLDER = "string" # Swagger UI placeholder value # Fonts DEFAULT_FONT = "thick" MINIMUM_FONT_SIZE = 7 # Image rendering IMAGES_DIRECTORY = ROOT / "images" DEFAULT_STYLE = "default" DEFAULT_EXTENSION = "png" ALLOWED_EXTENSIONS = [DEFAULT_EXTENSION, "jpg", "jpeg", "gif", "webp"] PLACEHOLDER_SUFFIX = ".img" PREVIEW_SIZE = (300, 300) DEFAULT_SIZE = (600, 600) MAXIMUM_PIXELS = 1920 * 1080 MAXIMUM_FRAMES = 20 # Watermarks DISABLED_WATERMARK = "none" DEFAULT_WATERMARK = "Memegen.link" ALLOWED_WATERMARKS = [DEFAULT_WATERMARK] WATERMARK_HEIGHT = 15 PREVIEW_TEXT = "PREVIEW" # Test images TEST_IMAGES_DIRECTORY = ROOT / "app" / "tests" / "images" TEST_IMAGES = [ ( "iw", ["tests code", "in production"], "jpg", ), ( "fry", ["a", "b"], "png", ), ( "fry", ["short line", "longer line of text than the short one"], "png", ), ( "fry", ["longer line of text than the short one", "short line"], "png", ), ( "sparta", ["", "this is a wide image!"], "png", ), ( "ski", [ "if you try to put a bunch more text than can possibly fit on a meme", "you're gonna have a bad time", ], "png", ), ( "ds", ["Push this button.", "Push that button.", "can't decide which is worse"], "png", ), ( "spongebob", ["You: Stop talking like that", "Me: Stop talking like that"], "png", ), ( "mouth", ["Sales Team presenting solution that won't work", "Excited Customer", "Me"], "png", ), ( "cmm", ["Many\nextra\nlines\nof\ntext"], "png", ), ( "oprah", ["you get animated text", "and you get animated text"], "gif", ), ] # Analytics TRACK_REQUESTS = True REMOTE_TRACKING_URL = os.getenv("REMOTE_TRACKING_URL") REMOTE_TRACKING_ERRORS = 0 REMOTE_TRACKING_ERRORS_LIMIT = int(os.getenv("REMOTE_TRACKING_ERRORS_LIMIT", "10")) BUGSNAG_API_KEY = os.getenv("BUGSNAG_API_KEY") app/main.py METASEP import asyncio import random import log from sanic import Sanic, response from sanic_ext import openapi from app import config, helpers, settings, utils app = Sanic(name="memegen") config.init(app) @app.get("/") @openapi.exclude(True) def index(request): return response.redirect("/docs") @app.get("/examples") @openapi.exclude(True) async def examples(request): animated = utils.urls.flag(request, "animated") items = await asyncio.to_thread(helpers.get_example_images, request, "", animated) urls = [items[0] for items in items] if settings.DEBUG: refresh = int(request.args.get("refresh", 5 * 60)) else: refresh = 0 random.shuffle(urls) content = utils.html.gallery(urls, columns=True, refresh=refresh) return response.html(content) @app.get("/test") @openapi.exclude(True) async def test(request): if not settings.DEBUG: return response.redirect("/") urls = await asyncio.to_thread(helpers.get_test_images, request) content = utils.html.gallery(urls, columns=False, refresh=20) return response.html(content) @app.get("/favicon.ico") @openapi.exclude(True) async def favicon(request): return await response.file("app/static/favicon.ico") @app.get("/robots.txt") @openapi.exclude(True) async def robots(request): return await response.file("app/static/robots.txt") if __name__ == "__main__": log.reset() log.silence("datafiles", allow_warning=True) app.run( host="0.0.0.0", port=settings.PORT, workers=settings.WORKERS, debug=settings.DEBUG, access_log=False, motd=False, ) app/helpers.py METASEP from sanic import Request from . import settings, utils from .models import Template def get_valid_templates( request: Request, query: str = "", animated: bool | None = None ) -> list[dict]: templates = Template.objects.filter(valid=True, _exclude="_custom") if query: templates = [t for t in templates if t.matches(query)] else: templates = sorted(templates) if animated is True: templates = [t for t in templates if "animated" in t.styles] elif animated is False: templates = [t for t in templates if "animated" not in t.styles] return [template.jsonify(request) for template in templates] def get_example_images( request: Request, query: str = "", animated: bool | None = None ) -> list[tuple[str, str]]: templates = Template.objects.filter(valid=True, _exclude="_custom") if query: templates = [t for t in templates if t.matches(query)] else: templates = sorted(templates) images = [] for template in templates: if animated is True and "animated" not in template.styles: continue if "animated" in template.styles and animated is not False: extension = "gif" else: extension = settings.DEFAULT_EXTENSION example = template.build_example_url(request, extension=extension) self = template.build_self_url(request) images.append((example, self)) return images def get_test_images(request: Request) -> list[str]: return [ request.app.url_for( "Memes.text", template_id=id, text_paths=utils.text.encode(lines) + "." + extension, ) for id, lines, extension in settings.TEST_IMAGES ] app/config.py METASEP import bugsnag from aiohttp.client_exceptions import ClientPayloadError from PIL import UnidentifiedImageError from sanic.exceptions import MethodNotSupported, NotFound from sanic.handlers import ErrorHandler from . import settings, utils, views IGNORED_EXCEPTIONS = ( ClientPayloadError, MethodNotSupported, NotFound, UnidentifiedImageError, ) class BugsnagErrorHandler(ErrorHandler): def default(self, request, exception): if self._should_notify(exception): bugsnag.notify(exception, metadata={"request": request.url}) return super().default(request, exception) @staticmethod def _should_notify(exception) -> bool: if not settings.BUGSNAG_API_KEY: return False if isinstance(exception, IGNORED_EXCEPTIONS): return False return True def init(app): app.config.API_HOST = app.config.SERVER_NAME = settings.SERVER_NAME app.config.API_SCHEMES = [settings.SCHEME] app.config.API_VERSION = utils.meta.version() app.config.API_TITLE = "Memegen.link" app.config.API_CONTACT_EMAIL = "[email protected]" app.config.API_LICENSE_NAME = "View the license" app.config.API_LICENSE_URL = ( "https://github.com/jacebrowning/memegen/blob/main/LICENSE.txt" ) app.config.OAS_UI_DEFAULT = "swagger" app.config.OAS_UI_REDOC = False app.config.SWAGGER_UI_CONFIGURATION = { "operationsSorter": "alpha", "docExpansion": "list", } app.ext.openapi.add_security_scheme("ApiKeyAuth", type="apiKey", name="X-API-KEY") app.ext.openapi.secured("ApiKeyAuth") app.blueprint(views.clients.blueprint) app.blueprint(views.memes.blueprint) app.blueprint(views.templates.blueprint) app.blueprint(views.shortcuts.blueprint) # registered last to avoid collisions app.error_handler = BugsnagErrorHandler() bugsnag.configure( api_key=settings.BUGSNAG_API_KEY, project_root="/app", release_stage=settings.RELEASE_STAGE, ) app/__init__.py METASEP app/views/templates.py METASEP import asyncio from contextlib import suppress from dataclasses import dataclass from sanic import Blueprint, exceptions, response from sanic_ext import openapi from .. import helpers, settings, utils from ..models import Template blueprint = Blueprint("Templates", url_prefix="/templates") @dataclass class ExampleResponse: text: list[str] url: str @dataclass class TemplateResponse: id: str name: str lines: int overlays: int styles: list[str] blank: str example: ExampleResponse source: str _self: str @blueprint.get("/") @openapi.summary("List all templates") @openapi.parameter( "animated", bool, "query", description="Limit results to templates supporting animation", ) @openapi.parameter( "filter", str, "query", description="Part of the name or example to match" ) @openapi.response( 200, {"application/json": list[TemplateResponse]}, "Successfully returned a list of all templates", ) async def index(request): query = request.args.get("filter", "").lower() animated = utils.urls.flag(request, "animated") data = await asyncio.to_thread( helpers.get_valid_templates, request, query, animated ) return response.json(data) @blueprint.get("/<id:slug>") @openapi.summary("View a specific template") @openapi.parameter("id", str, "path") @openapi.response( 200, {"application/json": TemplateResponse}, "Successfully returned a specific templates", ) @openapi.response(404, str, description="Template not found") async def detail(request, id): template: Template = Template.objects.get_or_none(id) if template: return response.json(template.jsonify(request)) raise exceptions.NotFound(f"Template not found: {id}") @dataclass class MemeRequest: text_lines: list[str] extension: str redirect: bool @dataclass class MemeResponse: url: str @blueprint.post("/<id:slug>") @openapi.tag("Memes") @openapi.operation("Memes.create_from_template") @openapi.exclude(settings.DEPLOYED) @openapi.summary("Create a meme from a template" + settings.SUFFIX) @openapi.parameter("id", str, "path") @openapi.body({"application/json": MemeRequest}) @openapi.response( 201, {"application/json": MemeResponse}, "Successfully created a meme from a template", ) async def build(request, id): return await generate_url(request, id) @dataclass class CustomRequest: background: str style: str text_lines: list[str] font: str extension: str redirect: bool @blueprint.post("/custom") @openapi.tag("Memes") @openapi.exclude(settings.DEPLOYED) @openapi.summary("Create a meme from any image" + settings.SUFFIX) @openapi.body({"application/json": CustomRequest}) @openapi.response( 201, {"application/json": MemeResponse}, "Successfully created a meme from a custom image", ) async def custom(request): return await generate_url(request) async def generate_url( request, template_id: str = "", *, template_id_required: bool = False ): if request.form: payload = dict(request.form) for key in list(payload.keys()): if "lines" not in key and "style" not in key: payload[key] = payload.pop(key)[0] else: try: payload = request.json or {} except exceptions.InvalidUsage: payload = {} with suppress(KeyError): payload["style"] = payload.pop("style[]") with suppress(KeyError): payload["text_lines"] = payload.pop("text_lines[]") if template_id_required: try: template_id = payload["template_id"] except KeyError: return response.json({"error": '"template_id" is required'}, status=400) else: template_id = utils.text.slugify(template_id) style: str = utils.urls.arg(payload, "", "style", "overlay", "alt") if isinstance(style, list): style = ",".join([(s.strip() or "default") for s in style]) while style.endswith(",default"): style = style.removesuffix(",default") text_lines = utils.urls.arg(payload, [], "text_lines") font = utils.urls.arg(payload, "", "font") background = utils.urls.arg(payload, "", "background", "image_url") extension = utils.urls.arg(payload, "", "extension") if style == "animated": extension = "gif" style = "" status = 201 if template_id: template: Template = Template.objects.get_or_create(template_id) url = template.build_custom_url( request, text_lines, style=style, font=font, extension=extension, ) if not template.valid: status = 404 template.delete() else: template = Template("_custom") url = template.build_custom_url( request, text_lines, background=background, style=style, font=font, extension=extension, ) url, _updated = await utils.meta.tokenize(request, url) if payload.get("redirect", False): return response.redirect(utils.urls.add(url, status="201")) return response.json({"url": url}, status=status) app/views/shortcuts.py METASEP from sanic import Blueprint, exceptions, response from sanic.log import logger from sanic_ext import openapi from .. import models, settings, utils blueprint = Blueprint("Shortcuts", url_prefix="/") @blueprint.get(r"/images/<template_id:[^.]+>") @openapi.summary("Redirect to an example image") @openapi.parameter("template_id", str, "path") @openapi.response( 302, {"image/*": bytes}, "Successfully redirected to an example image" ) @openapi.response(404, {"text/html": str}, "Template not found") @openapi.response(501, {"text/html": str}, "Template not fully implemented") async def example_path(request, template_id): template_id = utils.urls.clean(template_id) if settings.DEBUG: template = models.Template.objects.get_or_create(template_id) else: template = models.Template.objects.get_or_none(template_id) if template and template.valid: url = template.build_example_url(request, external=False) if settings.DEBUG: url = url.removesuffix(".png") return response.redirect(url) if settings.DEBUG: if "<" in template_id: message = f"Replace {template_id!r} in the URL" else: message = f"Template not fully implemented: {template}" logger.warning(message) template.datafile.save() raise exceptions.SanicException(message, 501) raise exceptions.NotFound(f"Template not found: {template_id}") @blueprint.get(r"/<template_id:.+\.\w+>") @openapi.exclude(settings.DEPLOYED) @openapi.summary("Redirect to an example image" + settings.SUFFIX) @openapi.parameter("template_id", str, "path") @openapi.response( 302, {"image/*": bytes}, "Successfully redirected to an example image" ) @openapi.response(404, {"text/html": str}, "Template not found") async def legacy_example_image(request, template_id): template_id, extension = template_id.rsplit(".", 1) template = models.Template.objects.get_or_none(template_id) if template: url = template.build_example_url(request, extension=extension, external=False) return response.redirect(url) raise exceptions.NotFound(f"Template not found: {template_id}") @blueprint.get(r"/<template_id:slug>") @openapi.exclude(settings.DEPLOYED) @openapi.summary("Redirect to an example image" + settings.SUFFIX) @openapi.parameter("template_id", str, "path") @openapi.response( 302, {"image/*": bytes}, "Successfully redirected to an example image" ) async def legacy_example_path(request, template_id): template_id = template_id.strip("/") return response.redirect(f"/images/{template_id}") @blueprint.get(r"/images/<template_id:slug>/<text_paths:[^/].*>") @openapi.summary("Redirect to a custom image") @openapi.parameter("text_paths", str, "path") @openapi.parameter("template_id", str, "path") @openapi.response(302, {"image/*": bytes}, "Successfully redirected to a custom image") async def custom_path(request, template_id, text_paths): if template_id == "images": return response.redirect(f"/images/{text_paths}".removesuffix("/")) if not settings.DEBUG: url = request.app.url_for( "Memes.text", template_id=template_id, text_paths=utils.urls.clean(text_paths) + "." + settings.DEFAULT_EXTENSION, ) return response.redirect(url) template = models.Template.objects.get_or_create(template_id) template.datafile.save() animated = utils.urls.flag(request, "animated") extension = "gif" if animated else "png" content = utils.html.gallery( [f"/images/{template_id}/{text_paths}.{extension}"], columns=False, refresh=30 if animated else 3, query_string=request.query_string, ) return response.html(content) @blueprint.get(r"/<template_id:(?!templates)[a-z-]+>/<text_paths:[^/].*\.\w+>") @openapi.exclude(settings.DEPLOYED) @openapi.summary("Redirect to a custom image" + settings.SUFFIX) @openapi.parameter("text_paths", str, "path") @openapi.parameter("template_id", str, "path") @openapi.response(302, {"image/*": bytes}, "Successfully redirected to a custom image") @openapi.response(404, {"text/html": str}, description="Template not found") async def legacy_custom_image(request, template_id, text_paths): text_paths, extension = text_paths.rsplit(".", 1) template = models.Template.objects.get_or_none(template_id) if template: url = request.app.url_for( "Memes.text", template_id=template_id, text_paths=text_paths + "." + extension, ) return response.redirect(url) raise exceptions.NotFound(f"Template not found: {template_id}") @blueprint.get(r"/<template_id:(?!templates)[a-z-]+>/<text_paths:[^/].*>") @openapi.exclude(settings.DEPLOYED) @openapi.summary("Redirect to a custom image" + settings.SUFFIX) @openapi.parameter("text_paths", str, "path") @openapi.parameter("template_id", str, "path") @openapi.response(302, {"image/*": bytes}, "Successfully redirected to a custom image") async def legacy_custom_path(request, template_id, text_paths): if template_id == "images": return response.redirect(f"/images/{text_paths}".removesuffix("/")) return response.redirect(f"/images/{template_id}/{text_paths}") app/views/memes.py METASEP import asyncio from dataclasses import dataclass from sanic import Blueprint, exceptions, response from sanic.log import logger from sanic_ext import openapi from .. import helpers, models, settings, utils from .templates import generate_url blueprint = Blueprint("Memes", url_prefix="/images") @dataclass class ExampleResponse: url: str template: str @blueprint.get("/") @openapi.summary("List example memes") @openapi.operation("Memes.list") @openapi.parameter( "filter", str, "query", description="Part of the template name or example to match" ) @openapi.response( 200, {"application/json": list[ExampleResponse]}, "Successfully returned a list of example memes", ) async def index(request): query = request.args.get("filter", "").lower() examples = await asyncio.to_thread(helpers.get_example_images, request, query) return response.json( [{"url": url, "template": template} for url, template in examples] ) @dataclass class MemeRequest: template_id: str style: list[str] text_lines: list[str] font: str extension: str redirect: bool @dataclass class MemeResponse: url: str @dataclass class ErrorResponse: error: str @blueprint.post("/") @openapi.summary("Create a meme from a template") @openapi.operation("Memes.create") @openapi.body({"application/json": MemeRequest}) @openapi.response( 201, {"application/json": MemeResponse}, "Successfully created a meme" ) @openapi.response( 400, {"application/json": ErrorResponse}, 'Required "template_id" missing in request body', ) @openapi.response( 404, {"application/json": ErrorResponse}, 'Specified "template_id" does not exist' ) async def create(request): return await generate_url(request, template_id_required=True) @dataclass class AutomaticRequest: text: str safe: bool redirect: bool @blueprint.post("/automatic") @openapi.exclude(not settings.REMOTE_TRACKING_URL) @openapi.summary("Create a meme from word or phrase") @openapi.body({"application/json": AutomaticRequest}) @openapi.response( 201, {"application/json": MemeResponse}, "Successfully created a meme" ) @openapi.response( 400, {"application/json": ErrorResponse}, 'Required "text" missing in request body' ) async def automatic(request): if request.form: payload = dict(request.form) else: try: payload = request.json or {} except exceptions.InvalidUsage: payload = {} try: query = payload["text"] except KeyError: return response.json({"error": '"text" is required'}, status=400) results = await utils.meta.search(request, query, payload.get("safe", True)) logger.info(f"Found {len(results)} result(s)") if not results: return response.json({"message": f"No results matched: {query}"}, status=404) url = utils.urls.normalize(results[0]["image_url"]) confidence = results[0]["confidence"] logger.info(f"Top result: {url} ({confidence})") url, _updated = await utils.meta.tokenize(request, url) if payload.get("redirect", False): return response.redirect(utils.urls.add(url, status="201")) return response.json({"url": url, "confidence": confidence}, status=201) @dataclass class CustomRequest: background: str style: str text_lines: list[str] font: str extension: str redirect: bool @blueprint.post("/custom") @openapi.summary("Create a meme from any image") @openapi.body({"application/json": CustomRequest}) @openapi.response( 201, {"application/json": MemeResponse}, description="Successfully created a meme from a custom image", ) async def custom(request): return await generate_url(request) @blueprint.get("/custom") @openapi.summary("List popular custom memes") @openapi.operation("Memes.list_custom") @openapi.parameter("safe", bool, "query", description="Exclude NSFW results") @openapi.parameter( "filter", str, "query", description="Part of the meme's text to match" ) @openapi.response( 200, {"application/json": list[MemeResponse]}, "Successfully returned a list of custom memes", ) async def list_custom(request): query = request.args.get("filter", "").lower() safe = utils.urls.flag(request, "safe", True) results = await utils.meta.search(request, query, safe, mode="results") logger.info(f"Found {len(results)} result(s)") if not results: return response.json({"message": f"No results matched: {query}"}, status=404) items = [] for result in results: url = utils.urls.normalize(result["image_url"]) url, _updated = await utils.meta.tokenize(request, url) items.append({"url": url}) return response.json(items, status=200) @blueprint.get(r"/<template_id:.+\.\w+>") @openapi.tag("Templates") @openapi.summary("Display a template background") @openapi.parameter("template_id", str, "path") @openapi.response( 200, {"image/*": bytes}, "Successfully displayed a template background" ) @openapi.response(404, {"image/*": bytes}, "Template not found") @openapi.response(415, {"image/*": bytes}, "Unable to download image URL") @openapi.response( 422, {"image/*": bytes}, "Invalid style for template or no image URL specified for custom template", ) async def blank(request, template_id): template_id, extension = template_id.rsplit(".", 1) if request.args.get("style") == "animated" and extension != "gif": # TODO: Move this pattern to utils params = {k: v for k, v in request.args.items() if k != "style"} url = request.app.url_for( "Memes.blank", template_id=template_id + ".gif", **params, ) return response.redirect(utils.urls.clean(url), status=301) return await render_image(request, template_id, extension=extension) @blueprint.get(r"/<template_id:slug>/<text_paths:[^/].*\.\w+>") @openapi.summary("Display a custom meme") @openapi.parameter("text_paths", str, "path") @openapi.parameter("template_id", str, "path") @openapi.response(200, {"image/*": bytes}, "Successfully displayed a custom meme") @openapi.response(404, {"image/*": bytes}, "Template not found") @openapi.response(414, {"image/*": bytes}, "Custom text too long (length >200)") @openapi.response(415, {"image/*": bytes}, "Unable to download image URL") @openapi.response( 422, {"image/*": bytes}, "Invalid style for template or no image URL specified for custom template", ) async def text(request, template_id, text_paths): text_paths, extension = text_paths.rsplit(".", 1) if request.args.get("style") == "animated" and extension != "gif": # TODO: Move this pattern to utils params = {k: v for k, v in request.args.items() if k != "style"} url = request.app.url_for( "Memes.text", template_id=template_id, text_paths=text_paths + ".gif", **params, ) return response.redirect(utils.urls.clean(url), status=301) slug, updated = utils.text.normalize(text_paths) if updated: url = request.app.url_for( "Memes.text", template_id=template_id, text_paths=slug + "." + extension, **request.args, ) return response.redirect(utils.urls.clean(url), status=301) url, updated = await utils.meta.tokenize(request, request.url) if updated: return response.redirect(url, status=302) watermark, updated = await utils.meta.get_watermark(request) if updated: # TODO: Move this pattern to utils params = {k: v for k, v in request.args.items() if k != "watermark"} url = request.app.url_for( "Memes.text", template_id=template_id, text_paths=slug + "." + extension, **params, ) return response.redirect(utils.urls.clean(url), status=302) return await render_image(request, template_id, slug, watermark, extension) async def render_image( request, id: str, slug: str = "", watermark: str = "", extension: str = settings.DEFAULT_EXTENSION, ): lines = utils.text.decode(slug) asyncio.create_task(utils.meta.track(request, lines)) status = int(utils.urls.arg(request.args, "200", "status")) if any(len(part.encode()) > 200 for part in slug.split("/")): logger.error(f"Slug too long: {slug}") slug = slug[:50] + "..." lines = utils.text.decode(slug) template = models.Template.objects.get("_error") style = settings.DEFAULT_STYLE status = 414 elif id == "custom": url = utils.urls.arg(request.args, None, "background", "alt") if url: template = await models.Template.create(url) if not template.image.exists(): logger.error(f"Unable to download image URL: {url}") template = models.Template.objects.get("_error") if url != settings.PLACEHOLDER: status = 415 style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, "style") if not utils.urls.schema(style): style = style.lower() if not await template.check(style): if utils.urls.schema(style): status = 415 elif style != settings.PLACEHOLDER: status = 422 else: logger.error("No image URL specified for custom template") template = models.Template.objects.get("_error") style = settings.DEFAULT_STYLE status = 422 else: template = models.Template.objects.get_or_none(id) if not template or not template.image.exists(): logger.error(f"No such template: {id}") template = models.Template.objects.get("_error") if id != settings.PLACEHOLDER: status = 404 style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, "style", "alt") if not await template.check(style): if utils.urls.schema(style): status = 415 elif style != settings.PLACEHOLDER: status = 422 if extension not in settings.ALLOWED_EXTENSIONS: extension = settings.DEFAULT_EXTENSION status = 422 font_name = utils.urls.arg(request.args, "", "font") if font_name == settings.PLACEHOLDER: font_name = "" else: try: models.Font.objects.get(font_name) except ValueError: font_name = "" status = 422 try: size = int(request.args.get("width", 0)), int(request.args.get("height", 0)) if 0 < size[0] < 10 or 0 < size[1] < 10: raise ValueError(f"dimensions are too small: {size}") except ValueError as e: logger.error(f"Invalid size: {e}") size = 0, 0 status = 422 frames = int(request.args.get("frames", 0)) path = await asyncio.to_thread( utils.images.save, template, lines, watermark, font_name=font_name, extension=extension, style=style, size=size, maximum_frames=frames, ) return await response.file(path, status) app/views/clients.py METASEP import asyncio from dataclasses import dataclass from datetime import datetime from sanic import Blueprint, response from sanic.log import logger from sanic_ext import openapi from .. import models, utils blueprint = Blueprint("Clients", url_prefix="/") @dataclass class AuthResponse: email: str image_access: bool search_access: bool created: datetime modified: datetime @dataclass class ErrorResponse: error: str @blueprint.post("/auth") @openapi.summary("Validate your API key") @openapi.response(200, {"application/json": AuthResponse}, "Your API key is valid") @openapi.response(401, {"application/json": ErrorResponse}, "Your API key is invalid") async def validate(request): info = await utils.meta.authenticate(request) return response.json( info or {"error": "API key missing or invalid."}, status=200 if info else 401, ) @dataclass class FontResponse: filename: str id: str alias: str @blueprint.get("/fonts") @openapi.summary("List available fonts") @openapi.response( 200, {"application/json": list[FontResponse]}, "Successfully returned a list of fonts", ) async def fonts(request): return response.json([font.data for font in models.Font.objects.all()]) @blueprint.get("/images/preview.jpg") @openapi.summary("Display a preview of a custom meme") @openapi.parameter("lines[]", str, "query", description="Lines of text to render") @openapi.parameter("style", str, "query", description="Style name or custom overlay") @openapi.parameter( "template", str, "query", description="Template ID, URL, or custom background" ) @openapi.response(200, {"image/jpeg": bytes}, "Successfully displayed a custom meme") async def preview(request): id = request.args.get("template", "_error") lines = request.args.getlist("lines[]", []) style = request.args.get("style") or ",".join(request.args.getlist("styles[]", [])) while style.endswith(",default"): style = style.removesuffix(",default") return await preview_image(request, id, lines, style) async def preview_image(request, id: str, lines: list[str], style: str): error = "" id = utils.urls.clean(id) if utils.urls.schema(id): template = await models.Template.create(id) if not template.image.exists(): logger.error(f"Unable to download image URL: {id}") template = models.Template.objects.get("_error") error = "Invalid Background" else: template = models.Template.objects.get_or_none(id) if not template: logger.error(f"No such template: {id}") template = models.Template.objects.get("_error") error = "Unknown Template" if not any(line.strip() for line in lines): lines = template.example if not utils.urls.schema(style): style = style.strip().lower() if not await template.check(style): error = "Invalid Overlay" data, content_type = await asyncio.to_thread( utils.images.preview, template, lines, style=style, watermark=error.upper() ) return response.raw(data, content_type=content_type) app/views/__init__.py METASEP from . import clients, memes, shortcuts, templates app/views/helpers.py METASEP
[ { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n", "type": "infile" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n", "type": "infile" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 422\n\n else:\n template = models.Template.objects.get_or_none(id)\n if not template or not template.image.exists():\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n if id != settings.PLACEHOLDER:\n status = 404\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\", \"alt\")\n if not await template.check(style):", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 422\n\n else:\n template = models.Template.objects.get_or_none(id)", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 422\n\n else:\n template = models.Template.objects.get_or_none(id)\n if not template or not template.image.exists():\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n if id != settings.PLACEHOLDER:\n status = 404\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 422\n\n else:\n template = models.Template.objects.get_or_none(id)\n if not template or not template.image.exists():\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n if id != settings.PLACEHOLDER:\n status = 404\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\", \"alt\")\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n if extension not in settings.ALLOWED_EXTENSIONS:\n extension = settings.DEFAULT_EXTENSION\n status = 422\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 422\n\n else:\n template = models.Template.objects.get_or_none(id)\n if not template or not template.image.exists():\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n if id != settings.PLACEHOLDER:\n status = 404\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\", \"alt\")\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n if extension not in settings.ALLOWED_EXTENSIONS:\n extension = settings.DEFAULT_EXTENSION\n status = 422\n\n font_name = utils.urls.arg(request.args, \"\", \"font\")\n if font_name == settings.PLACEHOLDER:\n font_name = \"\"\n else:\n try:", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 422\n\n else:\n template = models.Template.objects.get_or_none(id)\n if not template or not template.image.exists():\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n if id != settings.PLACEHOLDER:\n status = 404\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\", \"alt\")\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n if extension not in settings.ALLOWED_EXTENSIONS:\n extension = settings.DEFAULT_EXTENSION\n status = 422\n\n font_name = utils.urls.arg(request.args, \"\", \"font\")\n if font_name == settings.PLACEHOLDER:\n font_name = \"\"\n else:\n try:\n models.Font.objects.get(font_name)\n except ValueError:\n font_name = \"\"\n status = 422\n\n try:\n size = int(request.args.get(\"width\", 0)), int(request.args.get(\"height\", 0))\n if 0 < size[0] < 10 or 0 < size[1] < 10:\n raise ValueError(f\"dimensions are too small: {size}\")\n except ValueError as e:\n logger.error(f\"Invalid size: {e}\")\n size = 0, 0\n status = 422\n\n frames = int(request.args.get(\"frames\", 0))\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 422\n\n else:\n template = models.Template.objects.get_or_none(id)\n if not template or not template.image.exists():\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n if id != settings.PLACEHOLDER:\n status = 404\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\", \"alt\")\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n if extension not in settings.ALLOWED_EXTENSIONS:\n extension = settings.DEFAULT_EXTENSION\n status = 422\n\n font_name = utils.urls.arg(request.args, \"\", \"font\")\n if font_name == settings.PLACEHOLDER:\n font_name = \"\"\n else:\n try:\n models.Font.objects.get(font_name)\n except ValueError:\n font_name = \"\"\n status = 422\n\n try:\n size = int(request.args.get(\"width\", 0)), int(request.args.get(\"height\", 0))\n if 0 < size[0] < 10 or 0 < size[1] < 10:\n raise ValueError(f\"dimensions are too small: {size}\")\n except ValueError as e:\n logger.error(f\"Invalid size: {e}\")\n size = 0, 0\n status = 422\n\n frames = int(request.args.get(\"frames\", 0))\n\n path = await asyncio.to_thread(", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 422\n\n else:\n template = models.Template.objects.get_or_none(id)\n if not template or not template.image.exists():\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n if id != settings.PLACEHOLDER:\n status = 404\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\", \"alt\")\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n if extension not in settings.ALLOWED_EXTENSIONS:\n extension = settings.DEFAULT_EXTENSION\n status = 422\n\n font_name = utils.urls.arg(request.args, \"\", \"font\")\n if font_name == settings.PLACEHOLDER:\n font_name = \"\"\n else:\n try:\n models.Font.objects.get(font_name)\n except ValueError:\n font_name = \"\"\n status = 422\n\n try:\n size = int(request.args.get(\"width\", 0)), int(request.args.get(\"height\", 0))\n if 0 < size[0] < 10 or 0 < size[1] < 10:\n raise ValueError(f\"dimensions are too small: {size}\")\n except ValueError as e:\n logger.error(f\"Invalid size: {e}\")\n size = 0, 0\n status = 422\n\n frames = int(request.args.get(\"frames\", 0))\n\n path = await asyncio.to_thread(\n utils.images.save,", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 422\n\n else:\n template = models.Template.objects.get_or_none(id)\n if not template or not template.image.exists():\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n if id != settings.PLACEHOLDER:\n status = 404\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\", \"alt\")\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n if extension not in settings.ALLOWED_EXTENSIONS:\n extension = settings.DEFAULT_EXTENSION\n status = 422\n\n font_name = utils.urls.arg(request.args, \"\", \"font\")\n if font_name == settings.PLACEHOLDER:\n font_name = \"\"\n else:\n try:\n models.Font.objects.get(font_name)\n except ValueError:\n font_name = \"\"\n status = 422\n\n try:\n size = int(request.args.get(\"width\", 0)), int(request.args.get(\"height\", 0))\n if 0 < size[0] < 10 or 0 < size[1] < 10:\n raise ValueError(f\"dimensions are too small: {size}\")\n except ValueError as e:\n logger.error(f\"Invalid size: {e}\")\n size = 0, 0\n status = 422\n\n frames = int(request.args.get(\"frames\", 0))\n\n path = await asyncio.to_thread(\n utils.images.save,\n template,\n lines,\n watermark,\n font_name=font_name,\n extension=extension,\n style=style,\n size=size,\n maximum_frames=frames,\n )", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n", "type": "inproject" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n", "type": "common" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 422\n\n else:\n template = models.Template.objects.get_or_none(id)\n if not template or not template.image.exists():\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n if id != settings.PLACEHOLDER:\n status = 404\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\", \"alt\")\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n if extension not in settings.ALLOWED_EXTENSIONS:\n extension = settings.DEFAULT_EXTENSION\n status = 422\n\n font_name = utils.urls.arg(request.args, \"\", \"font\")\n if font_name == settings.PLACEHOLDER:\n font_name = \"\"\n else:\n try:\n models.Font.objects.get(font_name)\n except ValueError:\n font_name = \"\"\n status = 422\n\n try:\n size = int(request.args.get(\"width\", 0)), int(request.args.get(\"height\", 0))\n if 0 < size[0] < 10 or 0 < size[1] < 10:\n raise ValueError(f\"dimensions are too small: {size}\")\n except ValueError as e:\n logger.error(f\"Invalid size: {e}\")\n size = 0, 0\n status = 422\n", "type": "common" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 422\n\n else:\n template = models.Template.objects.get_or_none(id)\n if not template or not template.image.exists():\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n if id != settings.PLACEHOLDER:\n status = 404\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\", \"alt\")\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n if extension not in settings.ALLOWED_EXTENSIONS:\n extension = settings.DEFAULT_EXTENSION\n status = 422\n\n font_name = utils.urls.arg(request.args, \"\", \"font\")\n if font_name == settings.PLACEHOLDER:\n font_name = \"\"\n else:\n try:\n models.Font.objects.get(font_name)\n except ValueError:\n font_name = \"\"\n status = 422\n\n try:", "type": "common" }, { "content": "import asyncio\nfrom contextlib import suppress\n", "type": "non_informative" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)", "type": "non_informative" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):", "type": "random" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:", "type": "random" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):", "type": "random" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]\n else:\n try:\n payload = request.json or {}\n except exceptions.InvalidUsage:\n payload = {}\n\n with suppress(KeyError):\n payload[\"style\"] = payload.pop(\"style[]\")\n with suppress(KeyError):\n payload[\"text_lines\"] = payload.pop(\"text_lines[]\")\n\n if template_id_required:\n try:\n template_id = payload[\"template_id\"]\n except KeyError:\n return response.json({\"error\": '\"template_id\" is required'}, status=400)\n else:\n template_id = utils.text.slugify(template_id)\n\n style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")\n if isinstance(style, list):\n style = \",\".join([(s.strip() or \"default\") for s in style])\n while style.endswith(\",default\"):\n style = style.removesuffix(\",default\")\n text_lines = utils.urls.arg(payload, [], \"text_lines\")\n font = utils.urls.arg(payload, \"\", \"font\")\n background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")\n extension = utils.urls.arg(payload, \"\", \"extension\")\n\n if style == \"animated\":\n extension = \"gif\"\n style = \"\"\n\n status = 201\n\n if template_id:\n template: models.Template = models.Template.objects.get_or_create(template_id)\n url = template.build_custom_url(\n request,\n text_lines,\n style=style,\n font=font,\n extension=extension,\n )\n if not template.valid:\n status = 404\n template.delete()\n else:\n template = models.Template(\"_custom\")\n url = template.build_custom_url(\n request,\n text_lines,\n background=background,\n style=style,\n font=font,\n extension=extension,\n )\n\n url, _updated = await utils.meta.tokenize(request, url)\n\n if payload.get(\"redirect\", False):\n return response.redirect(utils.urls.add(url, status=\"201\"))\n\n return response.json({\"url\": url}, status=status)\n\n\nasync def preview_image(request, id: str, lines: list[str], style: str):\n error = \"\"\n\n id = utils.urls.clean(id)\n if utils.urls.schema(id):\n template = await models.Template.create(id)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Invalid Background\"\n else:\n template = models.Template.objects.get_or_none(id)\n if not template:\n logger.error(f\"No such template: {id}\")\n template = models.Template.objects.get(\"_error\")\n error = \"Unknown Template\"\n\n if not any(line.strip() for line in lines):\n lines = template.example\n\n if not utils.urls.schema(style):\n style = style.strip().lower()\n if not await template.check(style):\n error = \"Invalid Overlay\"\n\n data, content_type = await asyncio.to_thread(\n utils.images.preview, template, lines, style=style, watermark=error.upper()\n )\n return response.raw(data, content_type=content_type)\n\n\nasync def render_image(\n request,\n id: str,\n slug: str = \"\",\n watermark: str = \"\",\n extension: str = settings.DEFAULT_EXTENSION,\n):\n lines = utils.text.decode(slug)\n asyncio.create_task(utils.meta.track(request, lines))\n\n status = int(utils.urls.arg(request.args, \"200\", \"status\"))\n\n if any(len(part.encode()) > 200 for part in slug.split(\"/\")):\n logger.error(f\"Slug too long: {slug}\")\n slug = slug[:50] + \"...\"\n lines = utils.text.decode(slug)\n template = models.Template.objects.get(\"_error\")\n style = settings.DEFAULT_STYLE\n status = 414\n\n elif id == \"custom\":\n url = utils.urls.arg(request.args, None, \"background\", \"alt\")\n if url:\n template = await models.Template.create(url)\n if not template.image.exists():\n logger.error(f\"Unable to download image URL: {url}\")\n template = models.Template.objects.get(\"_error\")\n if url != settings.PLACEHOLDER:\n status = 415\n\n style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")\n if not utils.urls.schema(style):\n style = style.lower()\n if not await template.check(style):\n if utils.urls.schema(style):\n status = 415\n elif style != settings.PLACEHOLDER:\n status = 422\n\n else:\n logger.error(\"No image URL specified for custom template\")\n template = models.Template.objects.get(\"_error\")", "type": "random" }, { "content": "import asyncio\nfrom contextlib import suppress\n\nfrom sanic import exceptions, response\nfrom sanic.log import logger\n\nfrom .. import models, settings, utils\n\n\nasync def generate_url(\n request, template_id: str = \"\", *, template_id_required: bool = False\n):\n if request.form:\n payload = dict(request.form)\n for key in list(payload.keys()):\n if \"lines\" not in key and \"style\" not in key:\n payload[key] = payload.pop(key)[0]", "type": "random" } ]
[ "async def render_image(", "async def preview_image(request, id: str, lines: list[str], style: str):", " lines = utils.text.decode(slug)", " template = models.Template.objects.get(\"_error\")", " url = utils.urls.arg(request.args, None, \"background\", \"alt\")", " if url:", " template = await models.Template.create(url)", " if not template.image.exists():", " logger.error(f\"Unable to download image URL: {url}\")", " if url != settings.PLACEHOLDER:", " style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\")", " if not utils.urls.schema(style):", " if not await template.check(style):", " if utils.urls.schema(style):", " template = models.Template.objects.get_or_none(id)", " if not template or not template.image.exists():", " template_id = utils.text.slugify(template_id)", " style: str = utils.urls.arg(payload, \"\", \"style\", \"overlay\", \"alt\")", " style = utils.urls.arg(request.args, settings.DEFAULT_STYLE, \"style\", \"alt\")", " text_lines = utils.urls.arg(payload, [], \"text_lines\")", " font = utils.urls.arg(payload, \"\", \"font\")", " background = utils.urls.arg(payload, \"\", \"background\", \"image_url\")", " extension = utils.urls.arg(payload, \"\", \"extension\")", " font_name = utils.urls.arg(request.args, \"\", \"font\")", " template: models.Template = models.Template.objects.get_or_create(template_id)", " url = template.build_custom_url(", " models.Font.objects.get(font_name)", " if not template.valid:", " template.delete()", " template = models.Template(\"_custom\")", " path = await asyncio.to_thread(", " utils.images.save,", " template,", " url, _updated = await utils.meta.tokenize(request, url)", " return response.redirect(utils.urls.add(url, status=\"201\"))", " return response.json({\"url\": url}, status=status)", " return await response.file(path, status)", " id = utils.urls.clean(id)", " if utils.urls.schema(id):", " template = await models.Template.create(id)", " if not template:", " lines = template.example", " data, content_type = await asyncio.to_thread(", " utils.images.preview, template, lines, style=style, watermark=error.upper()", " return response.raw(data, content_type=content_type)", " asyncio.create_task(utils.meta.track(request, lines))", " status = int(utils.urls.arg(request.args, \"200\", \"status\"))", " if any(len(part.encode()) > 200 for part in slug.split(\"/\")):", " if payload.get(\"redirect\", False):", " frames = int(request.args.get(\"frames\", 0))", " size = int(request.args.get(\"width\", 0)), int(request.args.get(\"height\", 0))", "from sanic import exceptions, response", "", " error = \"\"", " status = 415", " error = \"Invalid Overlay\"", " style = settings.DEFAULT_STYLE", " else:" ]
METASEP
72
rstudio__py-shiny
rstudio__py-shiny METASEP tests/test_utils_async.py METASEP """Tests for `shiny.utils` async-related functions.""" import pytest import asyncio from typing import Iterator, List from shiny.utils import run_coro_sync def range_sync(n: int) -> Iterator[int]: """ An implementation of `range()` which uses `yield`, but doesn't actually give up control to the event loop. """ num = 0 while num < n: yield num num += 1 async def make_list_sync(n: int) -> List[int]: """ An `async` function that is in fact synchronous; it does not actually give up control. """ x: list[int] = [] for i in range_sync(n): x.append(i) return x async def make_list_async(n: int) -> List[int]: """An `async` function that gives up control.""" x: list[int] = [] for i in range_sync(n): await asyncio.sleep(0) x.append(i) return x def test_run_coro_sync(): # Running a coroutine that is in fact synchronous works fine. res = run_coro_sync(make_list_sync(1)) assert res == [0] res = run_coro_sync(make_list_sync(3)) assert res == [0, 1, 2] # Should error because the asyncio.sleep() gives up control. with pytest.raises(RuntimeError): run_coro_sync(make_list_async(1)) with pytest.raises(RuntimeError): run_coro_sync(make_list_async(3)) # Same with a direct call to asyncio.sleep() with pytest.raises(RuntimeError): run_coro_sync(asyncio.sleep(0)) with pytest.raises(RuntimeError): run_coro_sync(asyncio.sleep(0.1)) def test_run_coro_async(): async def async_main(): # awaited calls to the in-fact-synchronous function are OK. res = await make_list_sync(3) assert res == [0, 1, 2] # awaited calls to the async function are OK. res = await make_list_async(3) assert res == [0, 1, 2] await asyncio.sleep(0) # Calling run_coro_sync() should be the same as when called normally # (from a regular function, not an async function run by asyncio.run()). res = run_coro_sync(make_list_sync(3)) assert res == [0, 1, 2] with pytest.raises(RuntimeError): run_coro_sync(make_list_async(3)) with pytest.raises(RuntimeError): run_coro_sync(asyncio.sleep(0)) asyncio.run(async_main()) def test_run_coro_sync_type_check(): # Should raise an error if passed a regular generator (as opposed to a # coroutine object). with pytest.raises(TypeError): run_coro_sync(range_sync(0)) # type: ignore def test_async_generator(): # run_coro_sync() can't run async generators, but it can run async functions # which call async generators. # An async generator async def async_gen_range(n: int): for i in range(n): yield i # An async function which uses the generator async def main(n: int): x: list[int] = [] async for i in async_gen_range(n): x.append(i) return x # Running the async function works fine. res = run_coro_sync(main(3)) assert res == [0, 1, 2] # Attempting to run the async generator results in an error, because it # doesn't return a coroutine object. with pytest.raises(TypeError): run_coro_sync(async_gen_range(3)) # type: ignore def test_create_task(): # Should be OK to call create_task(). async def create_task_wrapper(): async def inner(): asyncio.create_task(make_list_async(3)) run_coro_sync(inner()) asyncio.run(create_task_wrapper()) # Should not be OK to await a task, because it doesn't complete immediately. async def create_task_wrapper2(): async def inner(): await asyncio.create_task(make_list_async(3)) run_coro_sync(inner()) with pytest.raises(RuntimeError): asyncio.run(create_task_wrapper2()) tests/test_shinysession.py METASEP """Tests for `shiny.shinysession`.""" import pytest from shiny import * def test_require_active_session_error_messages(): # _require_active_session() should report the caller's name when an error occurs. with pytest.raises(RuntimeError, match=r"Progress\(\) must be called"): Progress() with pytest.raises(RuntimeError, match=r"notification_remove\(\) must be called.*"): notification_remove("abc") tests/test_reactives.py METASEP """Tests for `shiny.reactives` and `shiny.reactcore`.""" import pytest import asyncio import shiny.reactcore as reactcore from shiny.reactives import * def test_flush_runs_newly_invalidated(): """ Make sure that a flush will also run any reactives that were invalidated during the flush. """ v1 = ReactiveVal(1) v2 = ReactiveVal(2) v2_result = None # In practice, on the first flush, Observers run in the order that they were # created. Our test checks that o2 runs _after_ o1. @observe() def o2(): nonlocal v2_result v2_result = v2() @observe() def o1(): v2(v1()) asyncio.run(reactcore.flush()) assert v2_result == 1 assert o2._exec_count == 2 assert o1._exec_count == 1 def test_flush_runs_newly_invalidated_async(): """ Make sure that a flush will also run any reactives that were invalidated during the flush. (Same as previous test, but async.) """ v1 = ReactiveVal(1) v2 = ReactiveVal(2) v2_result = None # In practice, on the first flush, Observers run in the order that they were # created. Our test checks that o2 runs _after_ o1. @observe_async() async def o2(): nonlocal v2_result v2_result = v2() @observe_async() async def o1(): v2(v1()) asyncio.run(reactcore.flush()) assert v2_result == 1 assert o2._exec_count == 2 assert o1._exec_count == 1 # ====================================================================== # Setting ReactiveVal to same value doesn't invalidate downstream # ====================================================================== def test_reactive_val_same_no_invalidate(): v = ReactiveVal(1) @observe() def o(): v() asyncio.run(reactcore.flush()) assert o._exec_count == 1 v(1) asyncio.run(reactcore.flush()) assert o._exec_count == 1 test_reactive_val_same_no_invalidate() # ====================================================================== # Recursive calls to reactives # ====================================================================== def test_recursive_reactive(): v = ReactiveVal(5) @reactive() def r(): if v() == 0: return 0 v(v() - 1) r() @observe() def o(): r() asyncio.run(reactcore.flush()) assert o._exec_count == 2 assert r._exec_count == 6 assert isolate(v) == 0 def test_recursive_reactive_async(): v = ReactiveVal(5) @reactive_async() async def r(): if v() == 0: return 0 v(v() - 1) await r() @observe_async() async def o(): await r() asyncio.run(reactcore.flush()) assert o._exec_count == 2 assert r._exec_count == 6 assert isolate(v) == 0 # ====================================================================== # Concurrent/sequential async # ====================================================================== def test_async_concurrent(): x: ReactiveVal[int] = ReactiveVal(1) results: list[int] = [] exec_order: list[str] = [] async def react_chain(n: int): @reactive_async() async def r(): nonlocal exec_order exec_order.append(f"r{n}-1") await asyncio.sleep(0) exec_order.append(f"r{n}-2") return x() + 10 @observe_async() async def _(): nonlocal exec_order exec_order.append(f"o{n}-1") await asyncio.sleep(0) exec_order.append(f"o{n}-2") val = await r() exec_order.append(f"o{n}-3") results.append(val + n * 100) async def go(): await asyncio.gather(react_chain(1), react_chain(2)) await reactcore.flush() x(5) await reactcore.flush() asyncio.run(go()) assert results == [111, 211, 115, 215] # fmt: off # This is the order of execution if async observers are run with separate # (interleaved) tasks. When it hits an `asyncio.sleep(0)`, it will yield # control and then the other observer in the other task will run. assert exec_order == [ 'o1-1', 'o2-1', 'o1-2', 'o2-2', 'r1-1', 'r2-1', 'r1-2', 'r2-2', 'o1-3', 'o2-3', 'o1-1', 'o2-1', 'o1-2', 'o2-2', 'r1-1', 'r2-1', 'r1-2', 'r2-2', 'o1-3', 'o2-3' ] # fmt: on def test_async_sequential(): # Same as previous, but with a sequential flush, as in # `flush(concurrent=False)`. x: ReactiveVal[int] = ReactiveVal(1) results: list[int] = [] exec_order: list[str] = [] async def react_chain(n: int): @reactive_async() async def r(): nonlocal exec_order exec_order.append(f"r{n}-1") await asyncio.sleep(0) exec_order.append(f"r{n}-2") return x() + 10 @observe_async() async def _(): nonlocal exec_order exec_order.append(f"o{n}-1") await asyncio.sleep(0) exec_order.append(f"o{n}-2") val = await r() exec_order.append(f"o{n}-3") results.append(val + n * 100) async def go(): await asyncio.gather(react_chain(1), react_chain(2)) await reactcore.flush(concurrent=False) x(5) await reactcore.flush(concurrent=False) asyncio.run(go()) assert results == [111, 211, 115, 215] # This is the order of execution if the async observers are run # sequentially. The `asyncio.sleep(0)` still yields control, but since there # are no other observers scheduled, it will simply resume at the same point. # fmt: off assert exec_order == [ 'o1-1', 'o1-2', 'r1-1', 'r1-2', 'o1-3', 'o2-1', 'o2-2', 'r2-1', 'r2-2', 'o2-3', 'o1-1', 'o1-2', 'r1-1', 'r1-2', 'o1-3', 'o2-1', 'o2-2', 'r2-1', 'r2-2', 'o2-3' ] # fmt: on # ====================================================================== # isolate() # ====================================================================== def test_isolate_basic_value(): # isolate() returns basic value assert isolate(lambda: 123) == 123 assert isolate(lambda: None) is None def test_isolate_basic_without_context(): # isolate() works with Reactive and ReactiveVal; allows executing without a # reactive context. v = ReactiveVal(1) @reactive() def r(): return v() + 10 def get_r(): return r() assert isolate(lambda: v()) == 1 assert isolate(v) == 1 assert isolate(lambda: r()) == 11 assert isolate(r) == 11 assert isolate(get_r) == 11 def test_isolate_prevents_dependency(): v = ReactiveVal(1) @reactive() def r(): return v() + 10 v_dep = ReactiveVal(1) # Use this only for invalidating the observer o_val = None @observe() def o(): nonlocal o_val v_dep() o_val = isolate(lambda: r()) asyncio.run(reactcore.flush()) assert o_val == 11 # Changing v() shouldn't invalidate o v(2) asyncio.run(reactcore.flush()) assert o_val == 11 assert o._exec_count == 1 # v_dep() should invalidate the observer v_dep(2) asyncio.run(reactcore.flush()) assert o_val == 12 assert o._exec_count == 2 # ====================================================================== # isolate_async() # ====================================================================== def test_isolate_async_basic_value(): async def f(): return 123 async def go(): assert await isolate_async(f) == 123 asyncio.run(go()) def test_isolate_async_basic_without_context(): # isolate_async() works with Reactive and ReactiveVal; allows executing # without a reactive context. v = ReactiveVal(1) @reactive_async() async def r(): return v() + 10 async def get_r(): return await r() async def go(): assert await isolate_async(r) == 11 assert await isolate_async(get_r) == 11 asyncio.run(go()) def test_isolate_async_prevents_dependency(): v = ReactiveVal(1) @reactive_async() async def r(): return v() + 10 v_dep = ReactiveVal(1) # Use this only for invalidating the observer o_val = None @observe_async() async def o(): nonlocal o_val v_dep() o_val = await isolate_async(r) asyncio.run(reactcore.flush()) assert o_val == 11 # Changing v() shouldn't invalidate o v(2) asyncio.run(reactcore.flush()) assert o_val == 11 assert o._exec_count == 1 # v_dep() should invalidate the observer v_dep(2) asyncio.run(reactcore.flush()) assert o_val == 12 assert o._exec_count == 2 # ====================================================================== # Priority for observers # ====================================================================== def test_observer_priority(): v = ReactiveVal(1) results: list[int] = [] @observe(priority=1) def o1(): nonlocal results v() results.append(1) @observe(priority=2) def o2(): nonlocal results v() results.append(2) @observe(priority=1) def o3(): nonlocal results v() results.append(3) asyncio.run(reactcore.flush()) assert results == [2, 1, 3] # Add another observer with priority 2. Only this one will run (until we # invalidate others by changing v). @observe(priority=2) def o4(): nonlocal results v() results.append(4) results.clear() asyncio.run(reactcore.flush()) assert results == [4] # Change v and run again, to make sure results are stable results.clear() v(2) asyncio.run(reactcore.flush()) assert results == [2, 4, 1, 3] results.clear() v(3) asyncio.run(reactcore.flush()) assert results == [2, 4, 1, 3] # Same as previous, but with async def test_observer_async_priority(): v = ReactiveVal(1) results: list[int] = [] @observe_async(priority=1) async def o1(): nonlocal results v() results.append(1) @observe_async(priority=2) async def o2(): nonlocal results v() results.append(2) @observe_async(priority=1) async def o3(): nonlocal results v() results.append(3) asyncio.run(reactcore.flush()) assert results == [2, 1, 3] # Add another observer with priority 2. Only this one will run (until we # invalidate others by changing v). @observe_async(priority=2) async def o4(): nonlocal results v() results.append(4) results.clear() asyncio.run(reactcore.flush()) assert results == [4] # Change v and run again, to make sure results are stable results.clear() v(2) asyncio.run(reactcore.flush()) assert results == [2, 4, 1, 3] results.clear() v(3) asyncio.run(reactcore.flush()) assert results == [2, 4, 1, 3] # ====================================================================== # Destroying observers # ====================================================================== def test_observer_destroy(): v = ReactiveVal(1) results: list[int] = [] @observe() def o1(): nonlocal results v() results.append(1) asyncio.run(reactcore.flush()) assert results == [1] v(2) o1.destroy() asyncio.run(reactcore.flush()) assert results == [1] # Same as above, but destroy before running first time v = ReactiveVal(1) results: list[int] = [] @observe() def o2(): nonlocal results v() results.append(1) o2.destroy() asyncio.run(reactcore.flush()) assert results == [] tests/test_datastructures.py METASEP """Tests for `shiny.datastructures`.""" from shiny.datastructures import PriorityQueueFIFO def test_priority_queue_fifo(): q: PriorityQueueFIFO[str] = PriorityQueueFIFO() # The random-seeming items are here to ensure that the value of the items # do not affect the order that they go into the queue. q.put(1, "9") q.put(1, "8") q.put(2, "6") q.put(2, "7") assert q.get() == "6" assert q.get() == "7" assert q.get() == "9" assert q.get() == "8" tests/__init__.py METASEP """Unit test package for shiny.""" shiny/utils.py METASEP from typing import ( TYPE_CHECKING, Callable, Awaitable, TypeVar, Optional, List, Dict, Any, ) import os import tempfile import importlib import inspect import secrets from htmltools import TagList, TagChildArg # ============================================================================== # Misc utility functions # ============================================================================== def rand_hex(bytes: int) -> str: """ Creates a random hexadecimal string of size `bytes`. The length in characters will be bytes*2. """ format_str = "{{:0{}x}}".format(bytes * 2) return format_str.format(secrets.randbits(bytes * 8)) # ============================================================================== # Async-related functions # ============================================================================== T = TypeVar("T") def wrap_async(fn: Callable[[], T]) -> Callable[[], Awaitable[T]]: """ Wrap a synchronous function that returns T, and return an async function that wraps the original function. """ async def fn_async() -> T: return fn() return fn_async def is_async_callable(obj: object) -> bool: """ Returns True if `obj` is an `async def` function, or if it's an object with a `__call__` method which is an `async def` function. """ if inspect.iscoroutinefunction(obj): return True if hasattr(obj, "__call__"): if inspect.iscoroutinefunction(obj.__call__): # type: ignore return True return False # See https://stackoverflow.com/a/59780868/412655 for an excellent explanation # of how this stuff works. # For a more in-depth explanation, see # https://snarky.ca/how-the-heck-does-async-await-work-in-python-3-5/. def run_coro_sync(coro: Awaitable[T]) -> T: """ Run a coroutine that is in fact synchronous. Given a coroutine (which is returned by calling an `async def` function), this function will run the coroutine for one iteration. If the coroutine completes, then return the value. If it does not complete, then it will throw a `RuntimeError`. What it means to be "in fact synchronous": the coroutine must not yield control to the event loop. A coroutine may have an `await` expression in it, and that may call another function that has an `await`, but the chain will only yield control if a `yield` statement bubbles through `await`s all the way up. For example, `await asyncio.sleep(0)` will have a `yield` which bubbles up to the next level. Note that a `yield` in a generator used the regular way (not with `await`) will not bubble up, since it is not awaited on. """ if not inspect.iscoroutine(coro): raise TypeError("run_coro_sync requires a Coroutine object.") try: coro.send(None) except StopIteration as e: return e.value raise RuntimeError( "async function yielded control; it did not finish in one iteration." ) # ============================================================================== # System-related functions # ============================================================================== # Return directory that a package lives in. def package_dir(package: str) -> str: with tempfile.TemporaryDirectory(): pkg_file = importlib.import_module(".", package=package).__file__ return os.path.dirname(pkg_file) shiny/types.py METASEP # Sentinel value - indicates a missing value in a function call. class MISSING_TYPE: pass MISSING = MISSING_TYPE() shiny/shinysession.py METASEP __all__ = ( "ShinySession", "Outputs", "get_current_session", "session_context", ) import sys import json import re import asyncio import warnings import typing import mimetypes from contextvars import ContextVar, Token from contextlib import contextmanager from typing import ( TYPE_CHECKING, Callable, Optional, Union, Awaitable, Dict, List, Any, ) from starlette.requests import Request from starlette.responses import Response, HTMLResponse, PlainTextResponse if sys.version_info >= (3, 8): from typing import TypedDict else: from typing_extensions import TypedDict if TYPE_CHECKING: from .shinyapp import ShinyApp from htmltools import TagChildArg, TagList, HTMLDependency from .reactives import ReactiveValues, Observer, ObserverAsync from .connmanager import Connection, ConnectionClosed from . import render from . import utils from .fileupload import FileInfo, FileUploadManager from .input_handlers import input_handlers # This cast is necessary because if the type checker thinks that if # "tag" isn't in `message`, then it's not a ClientMessage object. # This will be fixable when TypedDict items can be marked as # potentially missing, in Python 3.10, with PEP 655. class ClientMessage(TypedDict): method: str class ClientMessageInit(ClientMessage): data: Dict[str, object] class ClientMessageUpdate(ClientMessage): data: Dict[str, object] # For messages where "method" is something other than "init" or "update". class ClientMessageOther(ClientMessage): args: List[object] tag: int class ShinySession: # ========================================================================== # Initialization # ========================================================================== def __init__( self, app: "ShinyApp", id: str, conn: Connection, debug: bool = False ) -> None: self.app: ShinyApp = app self.id: str = id self._conn: Connection = conn self._debug: bool = debug self.input: ReactiveValues = ReactiveValues() self.output: Outputs = Outputs(self) self._message_queue_in: asyncio.Queue[Optional[ClientMessage]] = asyncio.Queue() self._message_queue_out: List[Dict[str, object]] = [] self._message_handlers: Dict[ str, Callable[..., Awaitable[object]] ] = self._create_message_handlers() self._file_upload_manager: FileUploadManager = FileUploadManager() self._on_ended_callbacks: List[Callable[[], None]] = [] self._has_run_session_end_tasks: bool = False self._register_session_end_callbacks() with session_context(self): self.app.server(self) def _register_session_end_callbacks(self) -> None: # This is to be called from the initialization. It registers functions # that are called when a session ends. # Clear file upload directories, if present self._on_ended_callbacks.append(self._file_upload_manager.rm_upload_dir) def _run_session_end_tasks(self) -> None: if self._has_run_session_end_tasks: return self._has_run_session_end_tasks = True for cb in self._on_ended_callbacks: try: cb() except Exception as e: print("Error in session on_ended callback: " + str(e)) self.app.remove_session(self) async def close(self, code: int = 1001) -> None: await self._conn.close(code, None) self._run_session_end_tasks() async def run(self) -> None: await self.send_message( {"config": {"workerId": "", "sessionId": str(self.id), "user": None}} ) try: while True: message: str = await self._conn.receive() if self._debug: print("RECV: " + message) try: message_obj = json.loads(message) except json.JSONDecodeError: print("ERROR: Invalid JSON message") continue if "method" not in message_obj: self._send_error_response("Message does not contain 'method'.") return if message_obj["method"] == "init": message_obj = typing.cast(ClientMessageInit, message_obj) self._manage_inputs(message_obj["data"]) elif message_obj["method"] == "update": message_obj = typing.cast(ClientMessageUpdate, message_obj) self._manage_inputs(message_obj["data"]) else: if "tag" not in message_obj: warnings.warn( "Cannot dispatch message with missing 'tag'; method: " + message_obj["method"] ) return if "args" not in message_obj: warnings.warn( "Cannot dispatch message with missing 'args'; method: " + message_obj["method"] ) return message_obj = typing.cast(ClientMessageOther, message_obj) await self._dispatch(message_obj) self.request_flush() await self.app.flush_pending_sessions() except ConnectionClosed: self._run_session_end_tasks() def _manage_inputs(self, data: Dict[str, object]) -> None: for (key, val) in data.items(): keys = key.split(":") if len(keys) > 2: raise ValueError( "Input name+type is not allowed to contain more than one ':' -- " + key ) if len(keys) == 2: val = input_handlers.process_value(keys[1], val, keys[0], self) self.input[keys[0]] = val # ========================================================================== # Message handlers # ========================================================================== async def _dispatch(self, message: ClientMessageOther) -> None: try: func = self._message_handlers[message["method"]] except AttributeError: self._send_error_response("Unknown method: " + message["method"]) return try: # TODO: handle `blobs` value: object = await func(*message["args"]) except Exception as e: self._send_error_response("Error: " + str(e)) return await self._send_response(message, value) async def _send_response(self, message: ClientMessageOther, value: object) -> None: await self.send_message({"response": {"tag": message["tag"], "value": value}}) # This is called during __init__. def _create_message_handlers(self) -> Dict[str, Callable[..., Awaitable[object]]]: async def uploadInit(file_infos: List[FileInfo]) -> Dict[str, object]: with session_context(self): if self._debug: print("Upload init: " + str(file_infos)) # TODO: Don't alter message in place? for fi in file_infos: if fi["type"] == "": type = mimetypes.guess_type(fi["name"])[0] fi["type"] = type if type else "application/octet-stream" job_id = self._file_upload_manager.create_upload_operation(file_infos) worker_id = "" return { "jobId": job_id, "uploadUrl": f"session/{self.id}/upload/{job_id}?w={worker_id}", } async def uploadEnd(job_id: str, input_id: str) -> None: upload_op = self._file_upload_manager.get_upload_operation(job_id) if upload_op is None: warnings.warn( "Received uploadEnd message for non-existent upload operation." ) return None file_data = upload_op.finish() self.input[input_id] = file_data # Explicitly return None to signal that the message was handled. return None return { "uploadInit": uploadInit, "uploadEnd": uploadEnd, } # ========================================================================== # Handling /session/{session_id}/{subpath} requests # ========================================================================== async def handle_request(self, request: Request) -> Response: subpath: str = request.path_params["subpath"] # type: ignore matches = re.search("^([a-z]+)/(.*)$", subpath) if not matches: return HTMLResponse("<h1>Bad Request</h1>", 400) if matches[1] == "upload" and request.method == "POST": # check that upload operation exists job_id = matches[2] upload_op = self._file_upload_manager.get_upload_operation(job_id) if not upload_op: return HTMLResponse("<h1>Bad Request</h1>", 400) # The FileUploadOperation can have multiple files; each one will # have a separate POST request. Each call to `with upload_op` will # open up each file (in sequence) for writing. with upload_op: async for chunk in request.stream(): upload_op.write_chunk(chunk) return PlainTextResponse("OK", 200) return HTMLResponse("<h1>Not Found</h1>", 404) # ========================================================================== # Outbound message handling # ========================================================================== def add_message_out(self, message: Dict[str, object]) -> None: self._message_queue_out.append(message) def get_messages_out(self) -> List[Dict[str, object]]: return self._message_queue_out def clear_messages_out(self) -> None: self._message_queue_out.clear() async def send_message(self, message: Dict[str, object]) -> None: message_str: str = json.dumps(message) + "\n" if self._debug: print( "SEND: " + re.sub("(?m)base64,[a-zA-Z0-9+/=]+", "[base64 data]", message_str), end="", ) await self._conn.send(json.dumps(message)) def _send_error_response(self, message_str: str) -> None: print("_send_error_response: " + message_str) pass # ========================================================================== # Flush # ========================================================================== def request_flush(self) -> None: self.app.request_flush(self) async def flush(self) -> None: values: Dict[str, object] = {} for value in self.get_messages_out(): values.update(value) message: Dict[str, object] = { "errors": {}, "values": values, "inputMessages": [], } try: await self.send_message(message) finally: self.clear_messages_out() # ========================================================================== # On session ended # ========================================================================== def on_ended(self, cb: Callable[[], None]) -> None: self._on_ended_callbacks.append(cb) # ========================================================================== # Misc # ========================================================================== async def unhandled_error(self, e: Exception) -> None: print("Unhandled error: " + str(e)) await self.close() class Outputs: def __init__(self, session: ShinySession) -> None: self._output_obervers: Dict[str, Observer] = {} self._session: ShinySession = session def __call__( self, name: str ) -> Callable[[Union[Callable[[], object], render.RenderFunction]], None]: def set_fn(fn: Union[Callable[[], object], render.RenderFunction]) -> None: # fn is either a regular function or a RenderFunction object. If # it's the latter, we can give it a bit of metadata, which can be # used by the if isinstance(fn, render.RenderFunction): fn.set_metadata(self._session, name) if name in self._output_obervers: self._output_obervers[name].destroy() @ObserverAsync async def output_obs(): await self._session.send_message( {"recalculating": {"name": name, "status": "recalculating"}} ) message: Dict[str, object] = {} if utils.is_async_callable(fn): fn2 = typing.cast(Callable[[], Awaitable[object]], fn) val = await fn2() else: val = fn() message[name] = val self._session.add_message_out(message) await self._session.send_message( {"recalculating": {"name": name, "status": "recalculated"}} ) self._output_obervers[name] = output_obs return None return set_fn # ============================================================================== # Context manager for current session (AKA current reactive domain) # ============================================================================== _current_session: ContextVar[Optional[ShinySession]] = ContextVar( "current_session", default=None ) def get_current_session() -> Optional[ShinySession]: return _current_session.get() @contextmanager def session_context(session: Optional[ShinySession]): token: Token[Union[ShinySession, None]] = _current_session.set(session) try: yield finally: _current_session.reset(token) def _require_active_session(session: Optional[ShinySession]) -> ShinySession: if session is None: session = get_current_session() if session is None: import inspect call_stack = inspect.stack() if len(call_stack) > 1: caller = call_stack[1] else: # Uncommon case: this function is called from the top-level, so the caller # is just _require_active_session. caller = call_stack[0] calling_fn_name = caller.function if calling_fn_name == "__init__": # If the caller is __init__, then we're most likely in the initialization of # an object. This will get the class name. calling_fn_name = caller.frame.f_locals["self"].__class__.__name__ raise RuntimeError( f"{calling_fn_name}() must be called from within an active Shiny session." ) return session # ============================================================================== # Miscellaneous functions # ============================================================================== class _RenderedDeps(TypedDict): deps: List[Dict[str, Any]] html: str def _process_deps( ui: TagChildArg, session: Optional[ShinySession] = None ) -> _RenderedDeps: session = _require_active_session(session) res = TagList(ui).render() deps: List[Dict[str, Any]] = [] for dep in res["dependencies"]: session.app.register_web_dependency(dep) dep_dict = dep.as_dict(lib_prefix=session.app.LIB_PREFIX) deps.append(dep_dict) return {"deps": deps, "html": res["html"]} shiny/shinymodule.py METASEP __all__ = ( "ReactiveValuesProxy", "OutputsProxy", "ShinySessionProxy", "ShinyModule", ) from typing import Optional, Union, Callable, Any from htmltools.core import TagChildArg from .shinysession import ShinySession, Outputs, _require_active_session from .reactives import ReactiveValues from .render import RenderFunction class ReactiveValuesProxy(ReactiveValues): def __init__(self, ns: str, values: ReactiveValues): self._ns: str = ns self._values: ReactiveValues = values def _ns_key(self, key: str) -> str: return self._ns + "-" + key def __setitem__(self, key: str, value: object) -> None: self._values[self._ns_key(key)] = value def __getitem__(self, key: str) -> object: return self._values[self._ns_key(key)] def __delitem__(self, key: str) -> None: del self._values[self._ns_key(key)] class OutputsProxy(Outputs): def __init__(self, ns: str, outputs: Outputs): self._ns: str = ns self._outputs: Outputs = outputs def _ns_key(self, key: str) -> str: return self._ns + "-" + key def __call__( self, name: str ) -> Callable[[Union[Callable[[], object], RenderFunction]], None]: return self._outputs(self._ns_key(name)) class ShinySessionProxy(ShinySession): def __init__(self, ns: str, parent_session: ShinySession) -> None: self._ns: str = ns self._parent: ShinySession = parent_session self.input: ReactiveValuesProxy = ReactiveValuesProxy(ns, parent_session.input) self.output: OutputsProxy = OutputsProxy(ns, parent_session.output) class ShinyModule: def __init__( self, ui: Callable[..., TagChildArg], server: Callable[[ShinySessionProxy], None], ) -> None: self._ui: Callable[..., TagChildArg] = ui self._server: Callable[[ShinySessionProxy], None] = server def ui(self, namespace: str, *args: Any) -> TagChildArg: ns = ShinyModule._make_ns_fn(namespace) return self._ui(ns, *args) def server(self, ns: str, *, session: Optional[ShinySession] = None) -> None: self.ns: str = ns session = _require_active_session(session) session_proxy = ShinySessionProxy(ns, session) self._server(session_proxy) @staticmethod def _make_ns_fn(namespace: str) -> Callable[[str], str]: def ns_fn(id: str) -> str: return namespace + "-" + id return ns_fn shiny/shinyapp.py METASEP __all__ = ("ShinyApp",) from typing import Any, List, Optional, Union, Dict, Callable, cast from htmltools import Tag, TagList, HTMLDocument, HTMLDependency, RenderedHTML import starlette.routing import starlette.websockets from starlette.types import Message, Receive, Scope, Send from starlette.requests import Request from starlette.responses import Response, HTMLResponse, JSONResponse from .http_staticfiles import StaticFiles from .shinysession import ShinySession, session_context from . import reactcore from .connmanager import ( Connection, StarletteConnection, ) from .html_dependencies import jquery_deps, shiny_deps class ShinyApp: LIB_PREFIX = "lib/" def __init__( self, ui: Union[Tag, TagList], server: Callable[[ShinySession], None], *, debug: bool = False, ) -> None: self.ui: RenderedHTML = _render_page(ui, lib_prefix=self.LIB_PREFIX) self.server: Callable[[ShinySession], None] = server self._debug: bool = debug self._sessions: Dict[str, ShinySession] = {} self._last_session_id: int = 0 # Counter for generating session IDs self._sessions_needing_flush: Dict[int, ShinySession] = {} self._registered_dependencies: Dict[str, HTMLDependency] = {} self._dependency_handler: Any = starlette.routing.Router() self.starlette_app = starlette.routing.Router( routes=[ starlette.routing.WebSocketRoute("/websocket/", self._on_connect_cb), starlette.routing.Route("/", self._on_root_request_cb, methods=["GET"]), starlette.routing.Route( "/session/{session_id}/{subpath:path}", self._on_session_request_cb, methods=["GET", "POST"], ), starlette.routing.Mount("/", app=self._dependency_handler), ] ) def create_session(self, conn: Connection) -> ShinySession: self._last_session_id += 1 id = str(self._last_session_id) session = ShinySession(self, id, conn, debug=self._debug) self._sessions[id] = session return session def remove_session(self, session: Union[ShinySession, str]) -> None: if isinstance(session, ShinySession): session = session.id if self._debug: print(f"remove_session: {session}") del self._sessions[session] def run(self, debug: Optional[bool] = None) -> None: import uvicorn # type: ignore if debug is not None: self._debug = debug uvicorn.run(cast(Any, self), host="0.0.0.0", port=8000) # ASGI entrypoint. Handles HTTP, WebSocket, and lifespan. async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: await self.starlette_app(scope, receive, send) async def call_pyodide(self, scope: Scope, receive: Receive, send: Send) -> None: # TODO: Pretty sure there are objects that need to be destroy()'d here? scope = cast(Any, scope).to_py() # ASGI requires some values to be byte strings, not character strings. Those are # not that easy to create in JavaScript, so we let the JS side pass us strings # and we convert them to bytes here. if "headers" in scope: # JS doesn't have `bytes` so we pass as strings and convert here scope["headers"] = [ [value.encode("latin-1") for value in header] for header in scope["headers"] ] if "query_string" in scope and scope["query_string"]: scope["query_string"] = scope["query_string"].encode("latin-1") if "raw_path" in scope and scope["raw_path"]: scope["raw_path"] = scope["raw_path"].encode("latin-1") async def rcv() -> Message: event = await receive() return cast(Message, cast(Any, event).to_py()) async def snd(event: Message): await send(event) await self(scope, rcv, snd) async def stop(self) -> None: # Close all sessions (convert to list to avoid modifying the dict while # iterating over it, which throws an error). for session in list(self._sessions.values()): await session.close() # ========================================================================== # Connection callbacks # ========================================================================== async def _on_root_request_cb(self, request: Request) -> Response: """ Callback passed to the ConnectionManager which is invoked when a HTTP request for / occurs. """ self._ensure_web_dependencies(self.ui["dependencies"]) return HTMLResponse(content=self.ui["html"]) async def _on_connect_cb(self, ws: starlette.websockets.WebSocket) -> None: """ Callback which is invoked when a new WebSocket connection is established. """ await ws.accept() conn = StarletteConnection(ws) session = self.create_session(conn) await session.run() async def _on_session_request_cb(self, request: Request) -> Response: """ Callback passed to the ConnectionManager which is invoked when a HTTP request for /session/* occurs. """ session_id: str = request.path_params["session_id"] # type: ignore # subpath: str = request.path_params["subpath"] if session_id in self._sessions: session: ShinySession = self._sessions[session_id] with session_context(session): return await session.handle_request(request) return JSONResponse({"detail": "Not Found"}, status_code=404) # ========================================================================== # Flush # ========================================================================== def request_flush(self, session: ShinySession) -> None: # TODO: Until we have reactive domains, because we can't yet keep track # of which sessions need a flush. pass # self._sessions_needing_flush[session.id] = session async def flush_pending_sessions(self) -> None: await reactcore.flush() # TODO: Until we have reactive domains, flush all sessions (because we # can't yet keep track of which ones need a flush) for _, session in self._sessions.items(): await session.flush() # for id, session in self._sessions_needing_flush.items(): # await session.flush() # del self._sessions_needing_flush[id] # ========================================================================== # HTML Dependency stuff # ========================================================================== def _ensure_web_dependencies(self, deps: List[HTMLDependency]) -> None: for dep in deps: self.register_web_dependency(dep) def register_web_dependency(self, dep: HTMLDependency) -> None: if ( dep.name in self._registered_dependencies and dep.version >= self._registered_dependencies[dep.name].version ): return paths = dep.source_path_map(lib_prefix=self.LIB_PREFIX) self._dependency_handler.mount( "/" + paths["href"], StaticFiles(directory=paths["source"]), name=dep.name + "-" + str(dep.version), ) self._registered_dependencies[dep.name] = dep def _render_page(ui: Union[Tag, TagList], lib_prefix: str) -> RenderedHTML: doc = HTMLDocument(TagList(jquery_deps(), shiny_deps(), ui)) return doc.render(lib_prefix=lib_prefix) shiny/render.py METASEP import sys import os import io import base64 import mimetypes import inspect from typing import TYPE_CHECKING, Callable, Optional, Awaitable, Union import typing if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal if sys.version_info >= (3, 8): from typing import TypedDict else: from typing_extensions import TypedDict from htmltools import TagChildArg if TYPE_CHECKING: from .shinysession import ShinySession from . import utils __all__ = ( "render_plot", "render_image", "render_ui", ) UserRenderFunction = Callable[[], object] UserRenderFunctionAsync = Callable[[], Awaitable[object]] class ImgData(TypedDict): src: str width: Union[str, float] height: Union[str, float] alt: Optional[str] ImgRenderFunc = Callable[[], ImgData] ImgRenderFuncAsync = Callable[[], Awaitable[ImgData]] class RenderFunction: def __init__(self, fn: UserRenderFunction) -> None: raise NotImplementedError def __call__(self) -> object: raise NotImplementedError def set_metadata(self, session: "ShinySession", name: str) -> None: """When RenderFunctions are assigned to Output object slots, this method is used to pass along session and name information. """ self._session: ShinySession = session self._name: str = name class RenderFunctionAsync(RenderFunction): async def __call__(self) -> object: raise NotImplementedError class RenderPlot(RenderFunction): _ppi: float = 96 def __init__(self, fn: UserRenderFunction, alt: Optional[str] = None) -> None: self._fn: UserRenderFunctionAsync = utils.wrap_async(fn) self._alt: Optional[str] = alt def __call__(self) -> object: return utils.run_coro_sync(self.run()) async def run(self) -> object: # Reactively read some information about the plot. pixelratio: float = typing.cast( float, self._session.input[".clientdata_pixelratio"] ) width: float = typing.cast( float, self._session.input[f".clientdata_output_{self._name}_width"] ) height: float = typing.cast( float, self._session.input[f".clientdata_output_{self._name}_height"] ) fig = await self._fn() if fig is None: return None # Try each type of renderer in turn. The reason we do it this way is to avoid # importing modules that aren't already loaded. That could slow things down, or # worse, cause an error if the module isn't installed. # # Each try_render function should return either an ImgResult, None (which # indicates that the rendering failed), or the string "TYPE_MISMATCH" (which # indicate that `fig` object was not the type of object that the renderer knows # how to handle). In the case of a "TYPE_MISMATCH", it will move on to the next # renderer. result: Union[ImgData, None, Literal["TYPE_MISMATCH"]] = None if "matplotlib" in sys.modules: result = try_render_plot_matplotlib( fig, width, height, pixelratio, self._ppi ) if result != "TYPE_MISMATCH": return result if "PIL" in sys.modules: result = try_render_plot_pil(fig, width, height, pixelratio, self._ppi) if result != "TYPE_MISMATCH": return result raise Exception("Unsupported figure type: " + str(type(fig))) class RenderPlotAsync(RenderPlot, RenderFunctionAsync): def __init__(self, fn: UserRenderFunctionAsync, alt: Optional[str] = None) -> None: if not inspect.iscoroutinefunction(fn): raise TypeError("PlotAsync requires an async function") # Init the Plot base class with a placeholder synchronous function so it # won't throw an error, then replace it with the async function. super().__init__(lambda: None, alt) self._fn: UserRenderFunctionAsync = fn async def __call__(self) -> object: return await self.run() def render_plot(alt: Optional[str] = None): def wrapper(fn: Union[UserRenderFunction, UserRenderFunctionAsync]) -> RenderPlot: if inspect.iscoroutinefunction(fn): fn = typing.cast(UserRenderFunctionAsync, fn) return RenderPlotAsync(fn, alt=alt) else: return RenderPlot(fn, alt=alt) return wrapper # Try to render a matplotlib object. If `fig` is not a matplotlib object, return # "TYPE_MISMATCH". If there's an error in rendering, return None. If successful in # rendering, return an ImgData object. def try_render_plot_matplotlib( fig: object, width: float, height: float, pixelratio: float, ppi: float, alt: Optional[str] = None, ) -> Union[ImgData, None, Literal["TYPE_MISMATCH"]]: import matplotlib.figure import matplotlib.pyplot if isinstance(fig, matplotlib.figure.Figure): try: fig.set_dpi(ppi * pixelratio) fig.set_size_inches(width / ppi, height / ppi) with io.BytesIO() as buf: fig.savefig(buf, format="png") buf.seek(0) data = base64.b64encode(buf.read()) data_str = data.decode("utf-8") res: ImgData = { "src": "data:image/png;base64," + data_str, "width": width, "height": height, "alt": alt, } return res except Exception as e: # TODO: just let errors propagate? print("Error rendering matplotlib object: " + str(e)) finally: matplotlib.pyplot.close(fig) return None else: return "TYPE_MISMATCH" def try_render_plot_pil( fig: object, width: float, height: float, pixelratio: float, ppi: float, alt: Optional[str] = None, ) -> Union[ImgData, None, Literal["TYPE_MISMATCH"]]: import PIL.Image if isinstance(fig, PIL.Image.Image): try: with io.BytesIO() as buf: fig.save(buf, format="PNG") buf.seek(0) data = base64.b64encode(buf.read()) data_str = data.decode("utf-8") res: ImgData = { "src": "data:image/png;base64," + data_str, "width": width, "height": height, "alt": alt, } return res except Exception as e: # TODO: just let errors propagate? print("Error rendering PIL object: " + str(e)) return None else: return "TYPE_MISMATCH" class RenderImage(RenderFunction): def __init__(self, fn: ImgRenderFunc, delete_file: bool = False) -> None: self._fn: ImgRenderFuncAsync = utils.wrap_async(fn) self._delete_file: bool = delete_file def __call__(self) -> object: return utils.run_coro_sync(self.run()) async def run(self) -> object: res: ImgData = await self._fn() src: str = res.get("src") try: with open(src, "rb") as f: data = base64.b64encode(f.read()) data_str = data.decode("utf-8") content_type = mimetypes.guess_type(src)[1] res["src"] = f"data:{content_type};base64,{data_str}" return res finally: if self._delete_file: os.remove(src) class RenderImageAsync(RenderImage, RenderFunctionAsync): def __init__(self, fn: ImgRenderFuncAsync, delete_file: bool = False) -> None: if not inspect.iscoroutinefunction(fn): raise TypeError("PlotAsync requires an async function") # Init the Plot base class with a placeholder synchronous function so it # won't throw an error, then replace it with the async function. super().__init__(lambda: None, delete_file) self._fn: ImgRenderFuncAsync = fn async def __call__(self) -> object: return await self.run() def render_image(delete_file: bool = False): def wrapper(fn: Union[ImgRenderFunc, ImgRenderFuncAsync]) -> RenderImage: if inspect.iscoroutinefunction(fn): fn = typing.cast(ImgRenderFuncAsync, fn) return RenderImageAsync(fn, delete_file=delete_file) else: fn = typing.cast(ImgRenderFunc, fn) return RenderImage(fn, delete_file=delete_file) return wrapper UiRenderFunc = Callable[[], TagChildArg] UiRenderFuncAsync = Callable[[], Awaitable[TagChildArg]] class RenderUI(RenderFunction): def __init__(self, fn: UiRenderFunc) -> None: self._fn: UiRenderFuncAsync = utils.wrap_async(fn) def __call__(self) -> object: return utils.run_coro_sync(self.run()) async def run(self) -> object: ui: TagChildArg = await self._fn() if ui is None: return None # TODO: better a better workaround for the circular dependency from .shinysession import _process_deps return _process_deps(ui, self._session) class RenderUIAsync(RenderUI, RenderFunctionAsync): def __init__(self, fn: UiRenderFuncAsync) -> None: if not inspect.iscoroutinefunction(fn): raise TypeError("PlotAsync requires an async function") super().__init__(lambda: None) self._fn: UiRenderFuncAsync = fn async def __call__(self) -> object: return await self.run() def render_ui(): def wrapper(fn: Union[UiRenderFunc, UiRenderFuncAsync]) -> RenderUI: if inspect.iscoroutinefunction(fn): fn = typing.cast(UiRenderFuncAsync, fn) return RenderUIAsync(fn) else: fn = typing.cast(UiRenderFunc, fn) return RenderUI(fn) return wrapper shiny/reactives.py METASEP """Reactive components""" __all__ = ( "ReactiveVal", "ReactiveValues", "Reactive", "ReactiveAsync", "reactive", "reactive_async", "Observer", "ObserverAsync", "observe", "observe_async", "isolate", "isolate_async", ) from typing import ( TYPE_CHECKING, Optional, Callable, Awaitable, TypeVar, Union, Generic, Any, overload, ) import typing import inspect from .reactcore import Context, Dependents from . import reactcore from . import utils from .types import MISSING, MISSING_TYPE if TYPE_CHECKING: from .shinysession import ShinySession T = TypeVar("T") # ============================================================================== # ReactiveVal and ReactiveValues # ============================================================================== class ReactiveVal(Generic[T]): def __init__(self, value: T) -> None: self._value: T = value self._dependents: Dependents = Dependents() @overload def __call__(self) -> T: ... @overload def __call__(self, value: T) -> bool: ... def __call__(self, value: Union[MISSING_TYPE, T] = MISSING) -> Union[T, bool]: if isinstance(value, MISSING_TYPE): return self.get() else: return self.set(value) def get(self) -> T: self._dependents.register() return self._value def set(self, value: T) -> bool: if self._value is value: return False self._value = value self._dependents.invalidate() return True class ReactiveValues: def __init__(self, **kwargs: object) -> None: self._map: dict[str, ReactiveVal[Any]] = {} for key, value in kwargs.items(): self._map[key] = ReactiveVal(value) def __setitem__(self, key: str, value: object) -> None: if key in self._map: self._map[key](value) else: self._map[key] = ReactiveVal(value) def __getitem__(self, key: str) -> Any: # Auto-populate key if accessed but not yet set. Needed to take reactive # dependencies on input values that haven't been received from client # yet. if key not in self._map: self._map[key] = ReactiveVal(None) return self._map[key]() def __delitem__(self, key: str) -> None: del self._map[key] # ============================================================================== # Reactive # ============================================================================== class Reactive(Generic[T]): def __init__( self, func: Callable[[], T], *, session: Union[MISSING_TYPE, "ShinySession", None] = MISSING, ) -> None: if inspect.iscoroutinefunction(func): raise TypeError("Reactive requires a non-async function") self._func: Callable[[], Awaitable[T]] = utils.wrap_async(func) self._is_async: bool = False self._dependents: Dependents = Dependents() self._invalidated: bool = True self._running: bool = False self._most_recent_ctx_id: int = -1 self._ctx: Optional[Context] = None self._exec_count: int = 0 self._session: Optional[ShinySession] # Use `isinstance(x, MISSING_TYPE)`` instead of `x is MISSING` because # the type checker doesn't know that MISSING is the only instance of # MISSING_TYPE; this saves us from casting later on. if isinstance(session, MISSING_TYPE): # If no session is provided, autodetect the current session (this # could be None if outside of a session). session = shinysession.get_current_session() self._session = session # Use lists to hold (optional) value and error, instead of Optional[T], # because it makes typing more straightforward. For example if # .get_value() simply returned self._value, self._value had type # Optional[T], then the return type for get_value() would have to be # Optional[T]. self._value: list[T] = [] self._error: list[Exception] = [] def __call__(self) -> T: # Run the Coroutine (synchronously), and then return the value. # If the Coroutine yields control, then an error will be raised. return utils.run_coro_sync(self.get_value()) async def get_value(self) -> T: self._dependents.register() if self._invalidated or self._running: await self.update_value() if self._error: raise self._error[0] return self._value[0] async def update_value(self) -> None: self._ctx = Context() self._most_recent_ctx_id = self._ctx.id self._ctx.on_invalidate(self._on_invalidate_cb) self._exec_count += 1 self._invalidated = False was_running = self._running self._running = True with shinysession.session_context(self._session): try: await self._ctx.run(self._run_func, create_task=self._is_async) finally: self._running = was_running def _on_invalidate_cb(self) -> None: self._invalidated = True self._value.clear() # Allow old value to be GC'd self._dependents.invalidate() self._ctx = None # Allow context to be GC'd async def _run_func(self) -> None: self._error.clear() try: self._value.append(await self._func()) except Exception as err: self._error.append(err) class ReactiveAsync(Reactive[T]): def __init__( self, func: Callable[[], Awaitable[T]], *, session: Union[MISSING_TYPE, "ShinySession", None] = MISSING, ) -> None: if not inspect.iscoroutinefunction(func): raise TypeError("ReactiveAsync requires an async function") # Init the Reactive base class with a placeholder synchronous function # so it won't throw an error, then replace it with the async function. # Need the `cast` to satisfy the type checker. super().__init__(lambda: typing.cast(T, None), session=session) self._func: Callable[[], Awaitable[T]] = func self._is_async = True async def __call__(self) -> T: return await self.get_value() def reactive( *, session: Union[MISSING_TYPE, "ShinySession", None] = MISSING ) -> Callable[[Callable[[], T]], Reactive[T]]: def create_reactive(fn: Callable[[], T]) -> Reactive[T]: return Reactive(fn, session=session) return create_reactive def reactive_async( *, session: Union[MISSING_TYPE, "ShinySession", None] = MISSING ) -> Callable[[Callable[[], Awaitable[T]]], ReactiveAsync[T]]: def create_reactive_async(fn: Callable[[], Awaitable[T]]) -> ReactiveAsync[T]: return ReactiveAsync(fn, session=session) return create_reactive_async # ============================================================================== # Observer # ============================================================================== class Observer: def __init__( self, func: Callable[[], None], *, session: Union[MISSING_TYPE, "ShinySession", None] = MISSING, priority: int = 0, ) -> None: if inspect.iscoroutinefunction(func): raise TypeError("Observer requires a non-async function") self._func: Callable[[], Awaitable[None]] = utils.wrap_async(func) self._is_async: bool = False self._priority: int = priority self._invalidate_callbacks: list[Callable[[], None]] = [] self._destroyed: bool = False self._ctx: Optional[Context] = None self._exec_count: int = 0 self._session: Optional[ShinySession] # Use `isinstance(x, MISSING_TYPE)`` instead of `x is MISSING` because # the type checker doesn't know that MISSING is the only instance of # MISSING_TYPE; this saves us from casting later on. if isinstance(session, MISSING_TYPE): # If no session is provided, autodetect the current session (this # could be None if outside of a session). session = shinysession.get_current_session() self._session = session if self._session is not None: self._session.on_ended(self._on_session_ended_cb) # Defer the first running of this until flushReact is called self._create_context().invalidate() def _create_context(self) -> Context: ctx = Context() # Store the context explicitly in Observer object # TODO: More explanation here self._ctx = ctx def on_invalidate_cb() -> None: # Context is invalidated, so we don't need to store a reference to it # anymore. self._ctx = None for cb in self._invalidate_callbacks: cb() # TODO: Wrap this stuff up in a continue callback, depending on if suspended? ctx.add_pending_flush(self._priority) async def on_flush_cb() -> None: if not self._destroyed: await self.run() ctx.on_invalidate(on_invalidate_cb) ctx.on_flush(on_flush_cb) return ctx async def run(self) -> None: ctx = self._create_context() self._exec_count += 1 with shinysession.session_context(self._session): try: await ctx.run(self._func, create_task=self._is_async) except Exception as e: if self._session: await self._session.unhandled_error(e) def on_invalidate(self, callback: Callable[[], None]) -> None: self._invalidate_callbacks.append(callback) def destroy(self) -> None: self._destroyed = True if self._ctx is not None: self._ctx.invalidate() def _on_session_ended_cb(self) -> None: self.destroy() class ObserverAsync(Observer): def __init__( self, func: Callable[[], Awaitable[None]], *, session: Union[MISSING_TYPE, "ShinySession", None] = MISSING, priority: int = 0, ) -> None: if not inspect.iscoroutinefunction(func): raise TypeError("ObserverAsync requires an async function") # Init the Observer base class with a placeholder synchronous function # so it won't throw an error, then replace it with the async function. super().__init__(lambda: None, session=session, priority=priority) self._func: Callable[[], Awaitable[None]] = func self._is_async = True def observe( *, priority: int = 0, session: Union[MISSING_TYPE, "ShinySession", None] = MISSING ) -> Callable[[Callable[[], None]], Observer]: def create_observer(fn: Callable[[], None]) -> Observer: return Observer(fn, priority=priority, session=session) return create_observer def observe_async( *, priority: int = 0, session: Union[MISSING_TYPE, "ShinySession", None] = MISSING ) -> Callable[[Callable[[], Awaitable[None]]], ObserverAsync]: def create_observer_async(fn: Callable[[], Awaitable[None]]) -> ObserverAsync: return ObserverAsync(fn, priority=priority, session=session) return create_observer_async # ============================================================================== # Miscellaneous functions # ============================================================================== def isolate(func: Callable[[], T]) -> T: # The `object` in func's type definition also encompasses Awaitable[object], # so add a runtime check to make sure that this hasn't been called with an # async function. if inspect.iscoroutinefunction(func): raise TypeError("isolate() requires a non-async function") func_async: Callable[[], Awaitable[T]] = utils.wrap_async(func) ctx: Context = reactcore.Context() try: return utils.run_coro_sync(ctx.run(func_async, create_task=False)) finally: ctx.invalidate() async def isolate_async(func: Callable[[], Awaitable[T]]) -> T: ctx: Context = reactcore.Context() try: return await ctx.run(func, create_task=True) finally: ctx.invalidate() # Import here at the bottom seems to fix a circular dependency problem. from . import shinysession shiny/reactcore.py METASEP """Low-level reactive components.""" from typing import Callable, Optional, Awaitable, TypeVar from contextvars import ContextVar from asyncio import Task import asyncio from .datastructures import PriorityQueueFIFO T = TypeVar("T") class Context: """A reactive context""" def __init__(self) -> None: self.id: int = _reactive_environment.next_id() self._invalidated: bool = False self._invalidate_callbacks: list[Callable[[], None]] = [] self._flush_callbacks: list[Callable[[], Awaitable[None]]] = [] async def run(self, func: Callable[[], Awaitable[T]], create_task: bool) -> T: """Run the provided function in this context""" env = _reactive_environment return await env.run_with(self, func, create_task) def invalidate(self) -> None: """Invalidate this context. It will immediately call the callbacks that have been registered with onInvalidate().""" if self._invalidated: return self._invalidated = True for cb in self._invalidate_callbacks: cb() self._invalidate_callbacks.clear() def on_invalidate(self, func: Callable[[], None]) -> None: """Register a function to be called when this context is invalidated""" if self._invalidated: func() else: self._invalidate_callbacks.append(func) def add_pending_flush(self, priority: int) -> None: """Tell the reactive environment that this context should be flushed the next time flushReact() called.""" _reactive_environment.add_pending_flush(self, priority) def on_flush(self, func: Callable[[], Awaitable[None]]) -> None: """Register a function to be called when this context is flushed.""" self._flush_callbacks.append(func) async def execute_flush_callbacks(self) -> None: """Execute all flush callbacks""" for cb in self._flush_callbacks: try: await cb() finally: pass self._flush_callbacks.clear() class Dependents: def __init__(self) -> None: self._dependents: dict[int, Context] = {} def register(self) -> None: ctx: Context = get_current_context() if ctx.id not in self._dependents: self._dependents[ctx.id] = ctx def on_invalidate_cb() -> None: if ctx.id in self._dependents: del self._dependents[ctx.id] ctx.on_invalidate(on_invalidate_cb) def invalidate(self) -> None: # TODO: Check sort order for id in sorted(self._dependents.keys()): ctx = self._dependents[id] ctx.invalidate() class ReactiveEnvironment: """The reactive environment""" def __init__(self) -> None: self._current_context: ContextVar[Optional[Context]] = ContextVar( "current_context", default=None ) self._next_id: int = 0 self._pending_flush_queue: PriorityQueueFIFO[Context] = PriorityQueueFIFO() def next_id(self) -> int: """Return the next available id""" id = self._next_id self._next_id += 1 return id def current_context(self) -> Context: """Return the current Context object""" ctx = self._current_context.get() if ctx is None: raise RuntimeError("No current reactive context") return ctx async def run_with( self, ctx: Context, context_func: Callable[[], Awaitable[T]], create_task: bool ) -> T: async def wrapper() -> T: old = self._current_context.set(ctx) try: return await context_func() finally: self._current_context.reset(old) if not create_task: return await wrapper() else: return await asyncio.create_task(wrapper()) async def flush(self, *, concurrent: bool = True) -> None: """Flush all pending operations""" # Currently, we default to concurrent flush. In the future, we'll # probably remove the option and just do it one way or the other. For a # concurrent flush, there are still some issues that need to be # resolved. if concurrent: await self._flush_concurrent() else: await self._flush_sequential() async def _flush_concurrent(self) -> None: # Flush observers concurrently, using Tasks. tasks: list[Task[None]] = [] # Double-nest the check for self._pending_flush because it is possible # that running a flush callback (in the gather()) will add another thing # to the pending flush list (like if an observer sets a reactive value, # which in turn invalidates other reactives/observers). while not self._pending_flush_queue.empty(): while not self._pending_flush_queue.empty(): # Take the first element ctx = self._pending_flush_queue.get() try: task: Task[None] = asyncio.create_task( ctx.execute_flush_callbacks() ) tasks.append(task) finally: pass await asyncio.gather(*tasks) async def _flush_sequential(self) -> None: # Sequential flush: instead of storing the tasks in a list and # calling gather() on them later, just run each observer in # sequence. while not self._pending_flush_queue.empty(): ctx = self._pending_flush_queue.get() try: await ctx.execute_flush_callbacks() finally: pass def add_pending_flush(self, ctx: Context, priority: int) -> None: self._pending_flush_queue.put(priority, ctx) _reactive_environment = ReactiveEnvironment() def get_current_context() -> Context: return _reactive_environment.current_context() async def flush(*, concurrent: bool = True) -> None: await _reactive_environment.flush(concurrent=concurrent) shiny/progress.py METASEP from typing import Optional, Dict, Any from warnings import warn from .utils import run_coro_sync, rand_hex from .shinysession import ShinySession, _require_active_session class Progress: _style = "notification" def __init__( self, min: int = 0, max: int = 1, session: Optional[ShinySession] = None ): self.min = min self.max = max self.value = None self._id = rand_hex(8) self._closed = False self._session = _require_active_session(session) msg = {"id": self._id, "style": self._style} self._send_progress("open", msg) def set( self, value: float, message: Optional[str] = None, detail: Optional[str] = None, ): if self._closed: warn("Attempting to set progress, but progress already closed.") return None self.value = value if value: # Normalize value to number between 0 and 1 value = min(1, max(0, (value - self.min) / (self.max - self.min))) msg = { "id": self._id, "message": message, "detail": detail, "value": value, "style": self._style, } self._send_progress("update", {k: v for k, v in msg.items() if v is not None}) def inc( self, amount: float = 0.1, message: Optional[str] = None, detail: Optional[str] = None, ): if self.value is None: self.value = self.min value = min(self.value + amount, self.max) self.set(value, message, detail) def close(self): if self._closed: warn("Attempting to close progress, but progress already closed.") return None self._send_progress("close", {"id": self._id, "style": self._style}) self._closed = True def _send_progress(self, type: str, message: Dict[str, Any]): return run_coro_sync( self._session.send_message({"progress": {"type": type, "message": message}}) ) shiny/page.py METASEP import sys from typing import Optional, Any, List from warnings import warn if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal from htmltools import tags, Tag, TagList, div, TagChildArg from .html_dependencies import bootstrap_deps from .navs import navs_bar def page_navbar( *args: TagChildArg, # Create a type for nav()? title: Optional[TagChildArg] = None, id: Optional[str] = None, selected: Optional[str] = None, position: Literal["static-top", "fixed-top", "fixed-bottom"] = "static-top", header: Optional[TagChildArg] = None, footer: Optional[TagChildArg] = None, bg: Optional[str] = None, inverse: Literal["auto", True, False] = "auto", collapsible: bool = True, fluid: bool = True, window_title: Optional[str] = None, lang: Optional[str] = None ) -> Tag: if title is not None and window_title is None: # Try to infer window_title from contents of title window_title = " ".join(find_characters(title)) if not window_title: warn( "Unable to infer a `window_title` default from `title`. Consider providing a character string to `window_title`." ) return tags.html( tags.head(tags.title(window_title)), tags.body( navs_bar( *args, title=title, id=id, selected=selected, position=position, header=header, footer=footer, bg=bg, inverse=inverse, collapsible=collapsible, fluid=fluid ) ), lang=lang, ) def page_fluid( *args: Any, title: Optional[str] = None, lang: Optional[str] = None, **kwargs: str ) -> Tag: return page_bootstrap( div(*args, class_="container-fluid", **kwargs), title=title, lang=lang ) def page_fixed( *args: Any, title: Optional[str] = None, lang: Optional[str] = None, **kwargs: str ) -> Tag: return page_bootstrap( div(*args, class_="container", **kwargs), title=title, lang=lang ) # TODO: implement theme (just Bootswatch for now?) def page_bootstrap( *args: Any, title: Optional[str] = None, lang: Optional[str] = None ) -> Tag: page = TagList(bootstrap_deps(), *args) head = tags.title(title) if title else None return tags.html(tags.head(head), tags.body(page), lang=lang) def find_characters(x: Any) -> List[str]: if isinstance(x, str): return [x] elif isinstance(x, list): return [y for y in x if isinstance(y, str)] else: return [] shiny/output.py METASEP from typing import Optional from htmltools import tags, Tag, div, css, TagAttrArg, TagFunction def output_plot( id: str, width: str = "100%", height: str = "400px", inline: bool = False ) -> Tag: res = output_image(id=id, width=width, height=height, inline=inline) res.add_class("shiny-plot-output") return res def output_image( id: str, width: str = "100%", height: str = "400px", inline: bool = False ) -> Tag: func = tags.span if inline else div style = None if inline else css(width=width, height=height) return func(id=id, class_="shiny-image-output", style=style) def output_text( id: str, inline: bool = False, container: Optional[TagFunction] = None ) -> Tag: if not container: container = tags.span if inline else tags.div return container(id=id, class_="shiny-text-output") # type: ignore def output_text_verbatim(id: str, placeholder: bool = False) -> Tag: cls = "shiny-text-output" + (" noplaceholder" if not placeholder else "") return tags.pre(id=id, class_=cls) def output_ui( id: str, inline: bool = False, container: Optional[TagFunction] = None, **kwargs: TagAttrArg ) -> Tag: if not container: container = tags.span if inline else tags.div return container(id=id, class_="shiny-html-output", **kwargs) # type: ignore shiny/notifications.py METASEP import sys from typing import Dict, Union, Optional, Any if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal from htmltools import TagList, TagChildArg from .utils import run_coro_sync, rand_hex from .shinysession import ShinySession, _require_active_session, _process_deps def notification_show( ui: TagChildArg, action: Optional[TagList] = None, duration: Optional[Union[int, float]] = 5, close_button: bool = True, id: Optional[str] = None, type: Literal["default", "message", "warning", "error"] = "default", session: Optional[ShinySession] = None, ): session = _require_active_session(session) ui_ = _process_deps(ui, session) action_ = _process_deps(action, session) payload: Dict[str, Any] = { "html": ui_["html"], "action": action_["html"], "deps": ui_["deps"] + action_["deps"], "closeButton": close_button, "id": id if id else rand_hex(8), "type": type, } if duration: payload.update({"duration": duration * 1000}) return run_coro_sync( session.send_message({"notification": {"type": "show", "message": payload}}) ) def notification_remove(id: str, session: Optional[ShinySession] = None): session = _require_active_session(session) run_coro_sync( session.send_message({"notification": {"type": "remove", "message": None}}) ) return id shiny/navs.py METASEP import sys from typing import Optional, Any, Tuple if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal from htmltools import jsx_tag_create, JSXTag, TagList, TagChildArg, JSXTagAttrArg from .html_dependencies import nav_deps def nav( title: Any, *args: TagChildArg, value: Optional[str] = None, icon: TagChildArg = None, ) -> JSXTag: if not value: value = title return nav_tag("Nav", *args, value=value, title=TagList(icon, title)) def nav_menu( title: TagChildArg, *args: TagChildArg, value: Optional[str] = None, icon: TagChildArg = None, align: Literal["left", "right"] = "left", ) -> JSXTag: if not value: value = str(title) return nav_tag( "NavMenu", *args, value=value, title=TagList(icon, title), align=align ) # def nav_content(value, *args, icon: TagChildArg = None) -> tag: # raise Exception("Not yet implemented") def nav_item(*args: TagChildArg) -> JSXTag: return nav_tag("NavItem", *args) def nav_spacer() -> JSXTag: return nav_tag("NavSpacer") def navs_tab( *args: TagChildArg, id: Optional[str] = None, selected: Optional[str] = None, header: Optional[TagChildArg] = None, footer: Optional[TagChildArg] = None, ) -> JSXTag: return nav_tag( "Navs", *args, type="tabs", id=id, selected=selected, header=header, footer=footer, ) def navs_tab_card( *args: TagChildArg, id: Optional[str] = None, selected: Optional[str] = None, header: Optional[TagChildArg] = None, footer: Optional[TagChildArg] = None, ) -> JSXTag: return nav_tag( "NavsCard", *args, type="tabs", id=id, selected=selected, header=header, footer=footer, ) def navs_pill( *args: TagChildArg, id: Optional[str] = None, selected: Optional[str] = None, header: Optional[TagChildArg] = None, footer: Optional[TagChildArg] = None, ) -> JSXTag: return nav_tag( "Navs", *args, type="pills", id=id, selected=selected, header=header, footer=footer, ) def navs_pill_card( *args: TagChildArg, id: Optional[str] = None, selected: Optional[str] = None, header: Optional[TagChildArg] = None, footer: Optional[TagChildArg] = None, placement: Literal["above", "below"] = "above", ) -> JSXTag: return nav_tag( "NavsCard", *args, type="pills", id=id, selected=selected, header=header, footer=footer, placement=placement, ) def navs_pill_list( *args: TagChildArg, id: Optional[str] = None, selected: Optional[str] = None, header: Optional[TagChildArg] = None, footer: Optional[TagChildArg] = None, well: bool = True, fluid: bool = True, widths: Tuple[int, int] = (4, 8), ) -> JSXTag: return nav_tag( "NavsList", *args, id=id, selected=selected, header=header, footer=footer, well=well, widthNav=widths[0], widthContent=widths[1], ) # def navs_hidden(*args, id: Optional[str] = None, selected: Optional[str] = None, header: Any=None, footer: Any=None) -> tag: # return nav_tag("NavsHidden", *args, id=id, selected=selected, header=header, footer=footer) def navs_bar( *args: TagChildArg, title: Optional[TagChildArg] = None, id: Optional[str] = None, selected: Optional[str] = None, position: Literal["static-top", "fixed-top", "fixed-bottom"] = "static-top", header: Optional[TagChildArg] = None, footer: Optional[TagChildArg] = None, bg: Optional[str] = None, inverse: Literal["auto", True, False] = "auto", collapsible: bool = True, fluid: bool = True, ) -> JSXTag: return nav_tag( "NavsBar", *args, title=title, id=id, selected=selected, position=position, header=header, footer=footer, bg=bg, inverse=inverse, collapsible=collapsible, fluid=fluid, ) def nav_tag(name: str, *args: TagChildArg, **kwargs: JSXTagAttrArg) -> JSXTag: tag = jsx_tag_create("bslib." + name) return tag(nav_deps(), *args, **kwargs) shiny/modal.py METASEP import sys from typing import Optional, Any if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal from htmltools import tags, Tag, div, HTML, TagChildArg, TagAttrArg from .utils import run_coro_sync from .shinysession import ShinySession, _require_active_session, _process_deps def modal_button(label: str, icon: TagChildArg = None) -> Tag: return tags.button( icon, label, type="button", class_="btn btn-default", data_dismiss="modal", data_bs_dismiss="modal", ) def modal( *args: TagChildArg, title: Optional[str] = None, footer: Any = modal_button("Dismiss"), size: Literal["m", "s", "l", "xl"] = "m", easy_close: bool = False, fade: bool = True, **kwargs: TagAttrArg ) -> Tag: title_div = None if title: title_div = div(tags.h4(title, class_="modal-title"), class_="modal-header") if footer: footer = div(footer, class_="modal-footer") dialog = div( div( title_div, div(*args, class_="modal-body", **kwargs), footer, class_="modal-content", ), class_="modal-dialog" + ({"s": " modal-sm", "l": " modal-lg", "xl": " modal-xl"}.get(size, "")), ) # jQuery plugin doesn't work in Bootstrap 5, but vanilla JS doesn't work in Bootstrap 4 :sob: js = "\n".join( [ "if (window.bootstrap && !window.bootstrap.Modal.VERSION.match(/^4\\. /)) {", " var modal=new bootstrap.Modal(document.getElementById('shiny-modal'))", " modal.show()", "} else {", " $('#shiny-modal').modal().focus()", "}", ] ) backdrop = None if easy_close else "static" keyboard = None if easy_close else "false" return div( dialog, tags.script(HTML(js)), id="shiny-modal", class_="modal fade" if fade else "modal", tabindex="-1", data_backdrop=backdrop, data_bs_backdrop=backdrop, data_keyboard=keyboard, data_bs_keyboard=keyboard, ) def modal_show(modal: Tag, session: Optional[ShinySession] = None): session = _require_active_session(session) msg = _process_deps(modal) return run_coro_sync( session.send_message({"modal": {"type": "show", "message": msg}}) ) def modal_remove(session: Optional[ShinySession] = None): session = _require_active_session(session) return run_coro_sync( session.send_message({"modal": {"type": "remove", "message": None}}) ) shiny/insert-ui.py METASEP import sys from typing import Optional if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal from htmltools import TagList from .shinysession import ShinySession, _require_active_session, _process_deps def ui_insert( selector: str, ui: TagList, where: Literal["beforeEnd", "beforeBegin", "afterBegin", "afterEnd"] = "beforeEnd", multiple: bool = False, immediate: bool = False, session: Optional[ShinySession] = None, ): session = _require_active_session(session) def callback(): msg = { "selector": selector, "multiple": multiple, "where": where, "content": _process_deps(ui, session), } session.send_message({"shiny-insert-ui": msg}) # TODO: Should session have an on_flush() method? If not, how to get context object from session? callback() if immediate else session.on_flush(callback, once=True) def ui_remove( selector: str, multiple: bool = False, immediate: bool = False, session: Optional[ShinySession] = None, ): session = _require_active_session(session) def callback(): session.send_message( {"shiny-remove-ui": {"selector": selector, "multiple": multiple}} ) callback() if immediate else session.on_flush(callback, once=True) shiny/input_utils.py METASEP from htmltools import tags, Tag, TagChildArg def shiny_input_label(id: str, label: TagChildArg = None) -> Tag: cls = "control-label" + ("" if label else " shiny-label-null") return tags.label(label, class_=cls, id=id + "-label", for_=id) shiny/input_update.py METASEP shiny/input_text.py METASEP from typing import Optional from htmltools import tags, Tag, div, css, TagChildArg from .input_utils import shiny_input_label def input_text( id: str, label: TagChildArg, value: str = "", width: Optional[str] = None, placeholder: Optional[str] = None, ) -> Tag: return div( shiny_input_label(id, label), tags.input( id=id, type="text", class_="form-control", value=value, placeholder=placeholder, ), class_="form-group shiny-input-container", style=css(width=width), ) def input_text_area( id: str, label: TagChildArg, value: str = "", width: Optional[str] = None, height: Optional[str] = None, cols: Optional[int] = None, rows: Optional[int] = None, placeholder: Optional[str] = None, resize: Optional[str] = None, ) -> Tag: if resize and resize not in ["none", "both", "horizontal", "vertical"]: raise ValueError("Invalid resize value: " + str(resize)) area = tags.textarea( id=id, class_="form-control", style=css(width=None if width else "100%", height=height, resize=resize), placeholder=placeholder, rows=rows, cols=cols, children=[value], ) return div( shiny_input_label(id, label), area, class_="form-group shiny-input-container", style=css(width=width), ) shiny/input_slider.py METASEP import math import sys from datetime import date, datetime, timedelta from typing import Dict, Optional, Union, Tuple, TypeVar if sys.version_info >= (3, 8): from typing import TypedDict else: from typing_extensions import TypedDict from typing_extensions import NotRequired from htmltools import tags, Tag, div, css, TagAttrArg, TagChildArg, HTML from .html_dependencies import ionrangeslider_deps from .input_utils import shiny_input_label __all__ = ["input_slider"] # TODO: validate value(s) are within (min,max)? SliderVal = TypeVar("SliderVal", int, float, datetime, date) class AnimationOptions(TypedDict): interval: NotRequired[int] loop: NotRequired[bool] play_button: NotRequired[TagChildArg] pause_button: NotRequired[TagChildArg] def input_slider( id: str, label: TagChildArg, min: SliderVal, max: SliderVal, value: Union[SliderVal, Tuple[SliderVal, SliderVal]], step: Optional[Union[int, float, timedelta]] = None, ticks: bool = True, animate: Union[bool, AnimationOptions] = False, width: Optional[str] = None, sep: str = ",", pre: Optional[str] = None, post: Optional[str] = None, time_format: Optional[str] = None, timezone: Optional[str] = None, drag_range: bool = True, ) -> Tag: # Thanks to generic typing, max, value, etc. should be of the same type data_type = _slider_type(min) # Make sure min, max, value, and step are all numeric # (converts dates/datetimes to milliseconds since epoch...this is the value JS wants) min_num = _as_numeric(min) max_num = _as_numeric(max) val_nums = ( (_as_numeric(value[0]), _as_numeric(value[1])) if isinstance(value, (tuple, list)) else (_as_numeric(value), _as_numeric(value)) ) step_num = _find_step_size(min_num, max_num) if step is None else _as_numeric(step) n_ticks = None if ticks: n_steps = (max_num - min_num) / step_num # Make sure there are <= 10 steps. # n_ticks can be a noninteger, which is good when the range is not an # integer multiple of the step size, e.g., min=1, max=10, step=4 scale_factor = math.ceil(n_steps / 10) n_ticks = n_steps / scale_factor props: Dict[str, TagAttrArg] = { "class_": "js-range-slider", "id": id, "style": css(width=width), "data_skin": "shiny", # TODO: do we need to worry about scientific notation (i.e., formatNoSci()?) "data_min": str(min_num), "data_max": str(max_num), "data_from": str(val_nums[0]), "data_step": str(step_num), "data_grid": ticks, "data_grid_num": n_ticks, "data_grid_snap": "false", "data_prettify_separator": sep, "data_prettify_enabled": sep != "", "data_prefix": pre, "data_postfix": post, "data_keyboard": "true", "data_data_type": data_type, "data_time_format": time_format, "data_timezone": timezone, } if isinstance(value, (tuple, list)): props["data_type"] = "double" props["data_to"] = str(val_nums[1]) props["data_drag_interval"] = drag_range if not time_format and data_type[0:4] == "date": props["data_time_format"] = "%F" if data_type == "date" else "%F %T" # ionRangeSlider wants attr = 'true'/'false' props = {k: str(v).lower() if isinstance(v, bool) else v for k, v in props.items()} slider_tag = div( shiny_input_label(id, label), tags.input(**props), *ionrangeslider_deps(), class_="form-group shiny-input-container", ) if animate is False: return slider_tag if animate is True: animate = AnimationOptions() animate_tag = div( tags.a( tags.span(animate.get("play_button", _play_icon()), class_="play"), tags.span(animate.get("pause_button", _pause_icon()), class_="pause"), href="#", class_="slider-animate-button", data_target_id=id, data_interval=animate.get("interval", 500), data_loop=animate.get("loop", True), ), class_="slider-animate-container", ) slider_tag.append(animate_tag) return slider_tag def _slider_type(x: SliderVal) -> str: if isinstance(x, datetime): return "datetime" if isinstance(x, date): return "date" return "number" def _as_numeric(x: Union[int, float, datetime, date, timedelta]) -> Union[int, float]: if isinstance(x, timedelta): return x.total_seconds() * 1000 if isinstance(x, datetime): return x.timestamp() * 1000 if isinstance(x, date): return datetime(x.year, x.month, x.day).timestamp() * 1000 return x def _find_step_size( min: Union[int, float], max: Union[int, float] ) -> Union[int, float]: # TODO: this is a naive version of shiny::findStepSize() that might be susceptible to # rounding errors? https://github.com/rstudio/shiny/pull/1956 range = max - min if range < 2 or isinstance(min, float) or isinstance(max, float): step = range / 100 # Round the step to get rid of any floating point arithmetic errors by # mimicing what signif(digits = 10, step) does in R (see Description of ?signif) # (the basic intuition is that smaller differences need more precision) return round(step, 10 - math.ceil(math.log10(step))) else: return 1 def _play_icon() -> HTML: try: from fontawesome import icon_svg return icon_svg("play") except ImportError: return HTML("&#x23ef;") def _pause_icon() -> HTML: try: from fontawesome import icon_svg return icon_svg("pause") except ImportError: return HTML("&#9616;&#9616;") shiny/input_select.py METASEP from typing import Optional, Dict, Union, List, cast from htmltools import Tag, tags, div, TagChildArg from .html_dependencies import selectize_deps from .input_utils import shiny_input_label # This is the canonical format for representing select options. SelectInputOptions = Dict[str, Union[str, Dict[str, str]]] def input_selectize( id: str, label: TagChildArg, choices: Union[List[str], Dict[str, Union[str, List[str], Dict[str, str]]]], *, selected: Optional[str] = None, multiple: bool = False, width: Optional[str] = None, size: Optional[str] = None, ) -> Tag: return input_select( id, label, choices, selected=selected, multiple=multiple, selectize=True, width=width, size=size, ) # # Make sure accessibility plugin is included by default # if not options.get("plugins", None): # options["plugins"] = [] # if "selectize-plugin-a11y" not in options["plugins"]: # options["plugins"].append("selectize-plugin-a11y") # deps = [selectize_deps()] # if "drag_drop" in options["plugins"]: # deps.append(jqui_deps()) # return jsx_tag_create("InputSelectize")(deps, id=id, options=options, **kwargs) def input_select( id: str, label: TagChildArg, choices: Union[List[str], Dict[str, Union[str, List[str], Dict[str, str]]]], *, selected: Optional[str] = None, multiple: bool = False, selectize: bool = False, width: Optional[str] = None, size: Optional[str] = None, ) -> Tag: choices_ = _normalize_choices(choices) if selected is None: selected = _find_first_option(choices_) choices_tags = _render_choices(choices_, selected) return div( shiny_input_label(id, label), div( tags.select( *choices_tags, id=id, class_=None if selectize else "form-select", multiple=multiple, width=width, size=size, ), ( [ tags.script("{}", type="application/json", data_for=id), selectize_deps(), ] if selectize else None ), ), class_="form-group shiny-input-container", ) # x can be structured like any of the following: # List: # ["a", "b", "c"] # Dictionary: # {"Choice A": "a", "Choice B": "b", "Choice C": "c"} # Dictionary with sub-lists or sub-dictionaries (which are optgroups): # { # "Choice A": "a", # "Group B": {"Choice B1": "b1", "Choice B2": "b2"}, # "Group C: ["c1, "c2"] # } def _normalize_choices( x: Union[List[str], Dict[str, Union[str, List[str], Dict[str, str]]]] ) -> SelectInputOptions: if isinstance(x, list): return {k: k for k in x} # If we got here, it's a dict. The value of each item. result = x.copy() for (k, value) in result.items(): # Convert list[str] to dict[str, str], but leave str, and dict[str, str] alone. if isinstance(value, list): result[k] = {k: k for k in value} # The type checker isn't smart enough to realize that none of the values are lists # at this point, so tell it to ignore the type. return result # type: ignore def _render_choices(x: SelectInputOptions, selected: Optional[str] = None) -> List[Tag]: result: List[Tag] = [] for (label, value) in x.items(): if isinstance(value, dict): # Type checker needs a little help here -- value is already a narrower type # than SelectInputOptions. value = cast(SelectInputOptions, value) result.append( tags.optgroup(*(_render_choices(value, selected)), label=label) ) else: result.append(tags.option(label, value=value, selected=(value == selected))) return result # Returns the first option in a SelectInputOptions object. For most cases, this is # straightforward. In the following, the first option is "a": # { "Choice A": "a", "Choice B": "b", "Choice C": "c" } # # Sometimes the first option is nested within an optgroup. For example, in the # following, the first option is "b1": # { # "Group A": {}, # "Group B": {"Choice B1": "b1", "Choice B2": "b2"}, # } def _find_first_option(x: SelectInputOptions) -> Optional[str]: for (_label, value) in x.items(): if isinstance(value, dict): value = cast(SelectInputOptions, value) result = _find_first_option(value) if result is not None: return result else: return value return None shiny/input_password.py METASEP from typing import Optional from htmltools import tags, Tag, div, css, TagChildArg from .input_utils import shiny_input_label def input_password( id: str, label: TagChildArg, value: str = "", width: Optional[str] = None, placeholder: Optional[str] = None, ) -> Tag: return div( shiny_input_label(id, label), tags.input( id=id, type="password", value=value, class_="form-control", placeholder=placeholder, ), class_="form-group shiny-input-container", style=css(width=width), ) shiny/input_numeric.py METASEP from typing import Optional, Union from htmltools import tags, Tag, div, css, TagChildArg from .input_utils import shiny_input_label valType = Union[int, float] def input_numeric( id: str, label: TagChildArg, value: valType, min: Optional[valType] = None, max: Optional[valType] = None, step: Optional[valType] = None, width: Optional[str] = None, ) -> Tag: return div( shiny_input_label(id, label), tags.input( id=id, type="number", class_="form-control", value=value, min=min, max=max, step=step, ), class_="form-group shiny-input-container", style=css(width=width), ) shiny/input_handlers.py METASEP from datetime import date, datetime from typing import TYPE_CHECKING, Callable, Dict, Union, List, Any, TypeVar if TYPE_CHECKING: from .shinysession import ShinySession InputHandlerType = Callable[[Any, str, "ShinySession"], Any] class _InputHandlers(Dict[str, InputHandlerType]): def __init__(self): super().__init__() def add(self, name: str, force: bool = False) -> Callable[[InputHandlerType], None]: def _(func: InputHandlerType): if name in self and not force: raise ValueError(f"Input handler {name} already registered") self[name] = func return None return _ def remove(self, name: str): del self[name] def process_value( self, type: str, value: Any, name: str, session: "ShinySession" ) -> Any: handler = self.get(type) if handler is None: raise ValueError("No input handler registered for type: " + type) return handler(value, name, session) input_handlers = _InputHandlers() _NumberType = TypeVar("_NumberType", int, float, None) # Doesn't do anything since it seems weird to coerce None into some sort of NA (like we do in R)? @input_handlers.add("shiny.number") def _(value: _NumberType, name: str, session: "ShinySession") -> _NumberType: return value # TODO: implement when we have bookmarking @input_handlers.add("shiny.password") def _(value: str, name: str, session: "ShinySession") -> str: return value @input_handlers.add("shiny.date") def _( value: Union[str, List[str]], name: str, session: "ShinySession" ) -> Union[date, List[date]]: if isinstance(value, str): return datetime.strptime(value, "%Y-%m-%d").date() return [datetime.strptime(v, "%Y-%m-%d").date() for v in value] @input_handlers.add("shiny.datetime") def _( value: Union[int, float, List[int], List[float]], name: str, session: "ShinySession" ) -> Union[datetime, List[datetime]]: if isinstance(value, (int, float)): return datetime.utcfromtimestamp(value) return [datetime.utcfromtimestamp(v) for v in value] class ActionButtonValue(int): pass @input_handlers.add("shiny.action") def _(value: int, name: str, session: "ShinySession") -> ActionButtonValue: return ActionButtonValue(value) # TODO: implement when we have bookmarking @input_handlers.add("shiny.file") def _(value: Any, name: str, session: "ShinySession") -> Any: return value shiny/input_file.py METASEP from typing import Optional, List from htmltools import tags, Tag, div, span, css, TagChildArg from .input_utils import shiny_input_label def input_file( id: str, label: TagChildArg, multiple: bool = False, accept: Optional[List[str]] = None, width: Optional[str] = None, button_label: str = "Browse...", placeholder: str = "No file selected", ) -> Tag: btn_file = span( button_label, tags.input( id=id, name=id, type="file", multiple="multiple" if multiple else None, accept=",".join(accept) if accept else None, # Don't use "display: none;" style, which causes keyboard accessibility issue; instead use the following workaround: https://css-tricks.com/places-its-tempting-to-use-display-none-but-dont/ style="position: absolute !important; top: -99999px !important; left: -99999px !important;", ), class_="btn btn-default btn-file", ) return div( shiny_input_label(id, label), div( tags.label(btn_file, class_="input-group-btn input-group-prepend"), tags.input( type="text", class_="form-control", placeholder=placeholder, readonly="readonly", ), class_="input-group", ), div( div(class_="progress-bar"), id=id + "_progress", class_="progress active shiny-file-input-progress", ), class_="form-group shiny-input-container", style=css(width=width), ) shiny/input_date.py METASEP import json from datetime import date from typing import Optional from htmltools import tags, Tag, div, span, TagAttrArg, TagChildArg, css from .html_dependencies import datepicker_deps from .input_utils import shiny_input_label __all__ = ["input_date", "input_date_range"] def input_date( id: str, label: TagChildArg, value: Optional[date] = None, min: Optional[date] = None, max: Optional[date] = None, format: str = "yyyy-mm-dd", startview: str = "month", weekstart: int = 0, language: str = "en", width: Optional[str] = None, autoclose: bool = True, datesdisabled: Optional[str] = None, daysofweekdisabled: Optional[str] = None, ) -> Tag: # TODO: needed? # value = dateYMD(value, "value") # min = dateYMD(min, "min") # max = dateYMD(max, "max") # datesdisabled = dateYMD(datesdisabled, "datesdisabled") return div( shiny_input_label(id, label), date_input_tag( id=id, value=value, min=min, max=max, format=format, startview=startview, weekstart=weekstart, language=language, autoclose=autoclose, data_date_dates_disabled=json.dumps(datesdisabled), data_date_days_of_week_disabled=json.dumps(daysofweekdisabled), ), id=id, class_="shiny-date-input form-group shiny-input-container", style=css(width=width), ) def input_date_range( id: str, label: TagChildArg, start: Optional[date] = None, end: Optional[date] = None, min: Optional[date] = None, max: Optional[date] = None, format: str = "yyyy-mm-dd", startview: str = "month", weekstart: int = 0, language: str = "en", separator: str = " to ", width: Optional[str] = None, autoclose: bool = True, ) -> Tag: # TODO: needed? # start = dateYMD(start, "start") # end = dateYMD(end, "end") # min = dateYMD(min, "min") # max = dateYMD(max, "max") return div( shiny_input_label(id, label), div( date_input_tag( id=id, value=start, min=min, max=max, format=format, startview=startview, weekstart=weekstart, language=language, autoclose=autoclose, ), # input-group-prepend and input-group-append are for bootstrap 4 forward compat span( span(separator, class_="input-group-text"), class_="input-group-addon input-group-prepend input-group-append", ), date_input_tag( id=id, value=end, min=min, max=max, format=format, startview=startview, weekstart=weekstart, language=language, autoclose=autoclose, ), # input-daterange class is needed for dropdown behavior class_="input-daterange input-group input-group-sm", ), id=id, class_="shiny-date-range-input form-group shiny-input-container", style=css(width=width), ) def date_input_tag( id: str, value: Optional[date], min: Optional[date], max: Optional[date], format: str, startview: str, weekstart: int, language: str, autoclose: bool, **kwargs: TagAttrArg, ): return tags.input( datepicker_deps(), type="text", class_="form-control", # `aria-labelledby` attribute is required for accessibility to avoid doubled labels (#2951). aria_labelledby=id + "-label", # title attribute is announced for screen readers for date format. title="Date format: " + format, data_date_language=language, data_date_week_start=weekstart, data_date_format=format, data_date_start_view=startview, data_min_date=min, data_max_date=max, data_initial_date=value, data_date_autoclose="true" if autoclose else "false", **kwargs, ) shiny/input_check_radio.py METASEP from typing import Optional, Union, List, Dict from htmltools import tags, Tag, div, span, css, TagChildArg from .input_utils import shiny_input_label def input_checkbox( id: str, label: TagChildArg, value: bool = False, width: Optional[str] = None ) -> Tag: return div( div( tags.label( tags.input( id=id, type="checkbox", checked="checked" if value else None ), span(label), ), class_="checkbox", ), class_="form-group shiny-input-container", style=css(width=width), ) choicesType = Union[Dict[str, str], List[str]] def input_checkbox_group( id: str, label: TagChildArg, choices: choicesType, choice_names: Optional[List[str]] = None, selected: Optional[str] = None, inline: bool = False, width: Optional[str] = None, ) -> Tag: input_label = shiny_input_label(id, label) options = generate_options( id=id, type="checkbox", choices=choices, choice_names=choice_names, selected=selected, inline=inline, ) return div( input_label, options, id=id, style=css(width=width), class_="form-group shiny-input-checkboxgroup shiny-input-container" + (" shiny-input-container-inline" if inline else ""), # https://www.w3.org/TR/wai-aria-practices/examples/checkbox/checkbox-1/checkbox-1.html role="group", aria_labelledby=input_label.attrs.get("id"), ) def input_radio_buttons( id: str, label: TagChildArg, choices: choicesType, choice_names: Optional[List[str]] = None, selected: Optional[str] = None, inline: bool = False, width: Optional[str] = None, ) -> Tag: input_label = shiny_input_label(id, label) options = generate_options( id=id, type="radio", choices=choices, choice_names=choice_names, selected=selected, inline=inline, ) return div( input_label, options, id=id, style=css(width=width), class_="form-group shiny-input-radiogroup shiny-input-container" + (" shiny-input-container-inline" if inline else ""), # https://www.w3.org/TR/2017/WD-wai-aria-practices-1.1-20170628/examples/radio/radio-1/radio-1.html role="radiogroup", aria_labelledby=input_label.attrs.get("id"), ) def generate_options( id: str, type: str, choices: choicesType, choice_names: Optional[List[str]], selected: Optional[str], inline: bool, ): if not choice_names: choice_names = list(choices.keys()) if isinstance(choices, dict) else choices choices = [v for k, v in choices.items()] if isinstance(choices, dict) else choices if type == "radio" and not selected: selected = choices[0] return div( *[ generate_option(id, type, choices[i], choice_names[i], selected, inline) for i in range(len(choices)) ], class_="shiny-options-group", ) def generate_option( id: str, type: str, choice: str, choice_name: str, selected: Optional[str], inline: bool, ): input = tags.input( type=type, name=id, value=choice, checked="checked" if selected == choice else None, ) if inline: return tags.label(input, span(choice_name), class_=type + "-inline") else: return div(tags.label(input, span(choice_name)), class_=type) shiny/input_button.py METASEP from typing import Optional from htmltools import tags, Tag, TagChildArg, TagAttrArg, css def input_button( id: str, label: TagChildArg, icon: TagChildArg = None, width: Optional[str] = None, **kwargs: TagAttrArg, ) -> Tag: return tags.button( icon, label, id=id, type="button", class_="btn btn-default action-button", style=css(width=width), **kwargs, ) def input_link( id: str, label: TagChildArg, icon: TagChildArg = None, **kwargs: TagAttrArg, ) -> Tag: return tags.a(icon, label, id=id, href="#", class_="action-button", **kwargs) shiny/http_staticfiles.py METASEP """ We can't use starlette's StaticFiles when running in wasm mode, because it launches a thread. Instead, use our own crappy version. Fortunately, this is all we need. When running in native Python mode, use the starlette StaticFiles impl; it's battle tested, whereas ours is not. Under wasm, it's OK if ours has bugs, even security holes: everything is running in the browser sandbox including the filesystem, so there's nothing we could disclose that an attacker wouldn't already have access to. The same is not true when running in native Python, we want to be as safe as possible. """ import sys if "pyodide" not in sys.modules: # Running in native mode; use starlette StaticFiles import starlette.staticfiles StaticFiles = starlette.staticfiles.StaticFiles # type: ignore else: # Running in wasm mode; must use our own simple StaticFiles from typing import Optional, Tuple, MutableMapping, Iterable from starlette.types import Scope, Receive, Send from starlette.responses import PlainTextResponse import os import os.path import mimetypes import pathlib import urllib.parse class StaticFiles: dir: pathlib.Path root_path: str def __init__(self, directory: str): self.dir = pathlib.Path(os.path.realpath(os.path.normpath(directory))) async def __call__(self, scope: Scope, receive: Receive, send: Send): if scope["type"] != "http": raise AssertionError("StaticFiles can't handle non-http request") path = scope["path"] path_segments = path.split("/") final_path, trailing_slash = traverse_url_path(self.dir, path_segments) if final_path is None: return await Error404()(scope, receive, send) if not final_path.exists(): return await Error404()(scope, receive, send) # Sanity check that final path is under self.dir, and if not, 404 if not final_path.is_relative_to(self.dir): return await Error404()(scope, receive, send) # Serve up the path if final_path.is_dir(): if trailing_slash: # We could serve up index.html or directory listing if we wanted return await Error404()(scope, receive, send) else: # We could redirect with an added "/" if we wanted return await Error404()(scope, receive, send) else: return await FileResponse(final_path)(scope, receive, send) def traverse_url_path( dir: pathlib.Path[str], path_segments: list[str] ) -> Tuple[Optional[pathlib.Path[str]], bool]: assert len(path_segments) > 0 new_dir = dir path_segment = urllib.parse.unquote(path_segments.pop(0)) # Gratuitous whitespace is not allowed if path_segment != path_segment.strip(): return None, False # Check for illegal paths if "/" in path_segment: return None, False elif path_segment == ".." or path_segment == ".": return None, False if path_segment != "": new_dir = dir / path_segment if len(path_segments) == 0: return new_dir, path_segment == "" else: return traverse_url_path(new_dir, path_segments) class Error404(PlainTextResponse): def __init__(self): super().__init__("404", status_code=404) # type: ignore class FileResponse: file: os.PathLike[str] headers: Optional[MutableMapping[str, str]] media_type: str def __init__( self, file: os.PathLike[str], headers: Optional[MutableMapping[str, str]] = None, media_type: Optional[str] = None, ) -> None: self.headers = headers self.file = file if media_type is None: media_type, _ = mimetypes.guess_type(file, strict=False) if media_type is None: media_type = "application/octet-stream" self.media_type = media_type async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: await send( { "type": "http.response.start", "status": 200, "headers": convert_headers(self.headers, self.media_type), } ) with open(self.file, "rb") as f: data = f.read() await send( {"type": "http.response.body", "body": data, "more_body": False} ) def convert_headers( headers: Optional[MutableMapping[str, str]], media_type: Optional[str] = None ) -> Iterable[Tuple[bytes, bytes]]: if headers is None: headers = {} header_list = [ (k.encode("latin-1"), v.encode("latin-1")) for k, v in headers.items() ] if media_type is not None: header_list += [ ( b"Content-Type", media_type.encode("latin-1"), ) ] return header_list shiny/html_dependencies.py METASEP from htmltools import HTMLDependency, HTML from typing import List, Union def shiny_deps() -> HTMLDependency: return HTMLDependency( name="shiny", version="0.0.1", source={"package": "shiny", "subdir": "www/shared/"}, script={"src": "shiny.js"}, stylesheet={"href": "shiny.min.css"}, ) def bootstrap_deps(bs3compat: bool = True) -> List[HTMLDependency]: dep = HTMLDependency( name="bootstrap", version="5.0.1", source={"package": "shiny", "subdir": "www/shared/bootstrap/"}, script={"src": "bootstrap.bundle.min.js"}, stylesheet={"href": "bootstrap.min.css"}, ) deps = [jquery_deps(), dep] if bs3compat: deps.append(bs3compat_deps()) return deps # TODO: if we want to support glyphicons we'll need to bundle font files, too def bs3compat_deps() -> HTMLDependency: return HTMLDependency( name="bs3-compat", version="1.0", source={"package": "shiny", "subdir": "www/shared/bs3compat/"}, script=[{"src": "transition.js"}, {"src": "tabs.js"}, {"src": "bs3compat.js"}], ) def jquery_deps() -> HTMLDependency: return HTMLDependency( name="jquery", version="3.6.0", source={"package": "shiny", "subdir": "www/shared/jquery/"}, script={"src": "jquery-3.6.0.min.js"}, ) def nav_deps( include_bootstrap: bool = True, ) -> Union[HTMLDependency, List[HTMLDependency]]: dep = HTMLDependency( name="bslib-navs", version="1.0", source={"package": "shiny", "subdir": "www/shared/bslib/dist/"}, script={"src": "navs.min.js"}, ) return [dep, *bootstrap_deps()] if include_bootstrap else dep def ionrangeslider_deps() -> List[HTMLDependency]: return [ HTMLDependency( name="ionrangeslider", version="2.3.1", source={"package": "shiny", "subdir": "www/shared/ionrangeslider/"}, script={"src": "js/ion.rangeSlider.min.js"}, stylesheet={"href": "css/ion.rangeSlider.css"}, ), HTMLDependency( name="strftime", version="0.9.2", source={"package": "shiny", "subdir": "www/shared/strftime/"}, script={"src": "strftime-min.js"}, ), ] def datepicker_deps() -> HTMLDependency: return HTMLDependency( name="bootstrap-datepicker", version="1.9.0", source={"package": "shiny", "subdir": "www/shared/datepicker/"}, # TODO: pre-compile the Bootstrap 5 version? stylesheet={"href": "css/bootstrap-datepicker3.min.css"}, script={"src": "js/bootstrap-datepicker.min.js"}, # Need to enable noConflict mode. See #1346. head=HTML( "<script>(function() { var datepicker = $.fn.datepicker.noConflict(); $.fn.bsDatepicker = datepicker; })();</script>" ), ) def selectize_deps() -> HTMLDependency: return HTMLDependency( name="selectize", version="0.12.6", source={"package": "shiny", "subdir": "www/shared/selectize/"}, script=[ {"src": "js/selectize.min.js"}, {"src": "accessibility/js/selectize-plugin-a11y.min.js"}, ], # TODO: pre-compile the Bootstrap 5 version? stylesheet={"href": "css/selectize.bootstrap3.css"}, ) def jqui_deps() -> HTMLDependency: return HTMLDependency( name="jquery-ui", version="1.12.1", source={"package": "shiny", "subdir": "www/shared/jqueryui/"}, script={"src": "jquery-ui.min.js"}, stylesheet={"href": "jquery-ui.min.css"}, ) shiny/fileupload.py METASEP import sys from typing import Optional, BinaryIO, List import typing import tempfile import os import copy import shutil import pathlib if sys.version_info >= (3, 8): from typing import TypedDict else: from typing_extensions import TypedDict from . import utils # File uploads happen through a series of requests. This requires a browser # which supports the HTML5 File API. # # 1. Client tells server that one or more files are about to be uploaded, with # an "uploadInit" message; the server responds with a "jobId" and "uploadUrl" # that the client should use to upload the files. From the server's # perspective, the messages look like this: # RECV {"method":"uploadInit","args":[[{"name":"mtcars.csv","size":1303,"type":"text/csv"}]],"tag":2} # SEND {"response":{"tag":2,"value":{"jobId":"1651ddebfb643a26e6f18aa1","uploadUrl":"session/3cdbe3c4d1318225fee8f2e3417a1c99/upload/1651ddebfb643a26e6f18aa1?w="}}} # # 2. For each file (sequentially): # b. Client makes a POST request with the file data. # c. Server sends a 200 response to the client. # # 3. Repeat 2 until all files have been uploaded. # # 4. Client tells server that all files have been uploaded, along with the # input ID that this data should be associated with. The server responds # with the tag ID and a null message. The messages look like this: # RECV {"method":"uploadEnd","args":["1651ddebfb643a26e6f18aa1","file1"],"tag":3} # SEND {"response":{"tag":3,"value":null}} # Information about a single file, with a structure like: # {'name': 'mtcars.csv', 'size': 1303, 'type': 'text/csv', 'datapath: '/...../mtcars.csv'} # The incoming data doesn't include 'datapath'; that field is added by the # FileUploadOperation class. class FileInfo(TypedDict): name: str size: int type: str datapath: str class FileUploadOperation: def __init__( self, parent: "FileUploadManager", id: str, dir: str, file_infos: List[FileInfo] ) -> None: self._parent: FileUploadManager = parent self._id: str = id self._dir: str = dir # Copy file_infos and add a "datapath" entry for each file. self._file_infos: list[FileInfo] = [ typing.cast(FileInfo, {**fi, "datapath": ""}) for fi in copy.deepcopy(file_infos) ] self._n_uploaded: int = 0 self._current_file_obj: Optional[BinaryIO] = None # Start uploading one of the files. def file_begin(self) -> None: file_info: FileInfo = self._file_infos[self._n_uploaded] file_ext = pathlib.Path(file_info["name"]).suffix file_info["datapath"] = os.path.join( self._dir, str(self._n_uploaded) + file_ext ) self._current_file_obj = open(file_info["datapath"], "ab") # Finish uploading one of the files. def file_end(self) -> None: if self._current_file_obj is not None: self._current_file_obj.close() self._current_file_obj = None self._n_uploaded += 1 # Write a chunk of data for the currently-open file. def write_chunk(self, chunk: bytes) -> None: if self._current_file_obj is None: raise RuntimeError(f"FileUploadOperation for {self._id} is not open.") self._current_file_obj.write(chunk) # End the entire operation, which can consist of multiple files. def finish(self) -> List[FileInfo]: if self._n_uploaded != len(self._file_infos): raise RuntimeError( f"Not all files for FileUploadOperation {self._id} were uploaded." ) self._parent.on_job_finished(self._id) return self._file_infos # Context handlers for `with` def __enter__(self) -> None: self.file_begin() def __exit__(self, type, value, trace) -> None: # type: ignore self.file_end() class FileUploadManager: def __init__(self) -> None: # TODO: Remove basedir when app exits. self._basedir: str = tempfile.mkdtemp(prefix="fileupload-") self._operations: dict[str, FileUploadOperation] = {} def create_upload_operation(self, file_infos: List[FileInfo]) -> str: job_id = utils.rand_hex(12) dir = tempfile.mkdtemp(dir=self._basedir) self._operations[job_id] = FileUploadOperation(self, job_id, dir, file_infos) return job_id def get_upload_operation(self, id: str) -> Optional[FileUploadOperation]: if id in self._operations: return self._operations[id] else: return None def on_job_finished(self, job_id: str) -> None: del self._operations[job_id] # Remove the directories containing file uploads; this is to be called when # a session ends. def rm_upload_dir(self) -> None: shutil.rmtree(self._basedir) shiny/datastructures.py METASEP from typing import TypeVar, Generic from queue import PriorityQueue T = TypeVar("T") class PriorityQueueFIFO(Generic[T]): """ Similar to queue.PriorityQueue, except that if two elements have the same priority, they are returned in the order they were inserted. Also, the item is kept separate from the priority value (with PriorityQueue, the priority is part of the item). """ def __init__(self) -> None: self._pq: PriorityQueue[tuple[int, int, T]] = PriorityQueue() self._counter: int = 0 def put(self, priority: int, item: T) -> None: """ Add an item to the queue. Parameters: priority (int): The priority of the item. Higher priority items will come out of the queue before lower priority items. item (T): The item to put in the queue. """ self._counter += 1 self._pq.put((-priority, self._counter, item)) def get(self) -> T: return self._pq.get()[2] def empty(self) -> bool: return self._pq.empty() shiny/connmanager.py METASEP from abc import ABC, abstractmethod from typing import Optional import starlette.websockets from starlette.websockets import WebSocketState class Connection(ABC): """Abstract class to serve a session and send/receive messages to the client.""" @abstractmethod async def send(self, message: str) -> None: ... @abstractmethod async def receive(self) -> str: ... @abstractmethod async def close(self, code: int, reason: Optional[str]) -> None: ... class StarletteConnection(Connection): def __init__(self, conn: starlette.websockets.WebSocket): self.conn: starlette.websockets.WebSocket = conn async def accept(self, subprotocol: Optional[str] = None): await self.conn.accept(subprotocol) # type: ignore async def send(self, message: str) -> None: if self._is_closed(): return await self.conn.send_text(message) async def receive(self) -> str: if self._is_closed(): raise ConnectionClosed() try: return await self.conn.receive_text() except starlette.websockets.WebSocketDisconnect: raise ConnectionClosed() async def close(self, code: int, reason: Optional[str]) -> None: if self._is_closed(): return await self.conn.close(code) def _is_closed(self) -> bool: return ( self.conn.application_state == WebSocketState.DISCONNECTED # type: ignore or self.conn.client_state == WebSocketState.DISCONNECTED # type: ignore ) class ConnectionClosed(Exception): """Raised when a Connection is closed from the other side.""" pass shiny/bootstrap.py METASEP import sys from typing import Callable, Optional if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal from htmltools import ( TagChildArg, TagAttrArg, TagList, Tag, div, tags, h2, css, span, HTML, ) from .html_dependencies import jqui_deps def row(*args: TagChildArg, **kwargs: TagAttrArg) -> Tag: return div(*args, class_="row", **kwargs) def column( width: int, *args: TagChildArg, offset: int = 0, **kwargs: TagAttrArg ) -> Tag: if width < 1 or width > 12: raise ValueError("Column width must be between 1 and 12") cls = "col-sm-" + str(width) if offset > 0: # offset-md-x is for bootstrap 4 forward compat # (every size tier has been bumped up one level) # https://github.com/twbs/bootstrap/blob/74b8fe7/docs/4.3/migration/index.html#L659 off = str(offset) cls += f" offset-md-{off} col-sm-offset-{off}" return div(*args, class_=cls, **kwargs) # TODO: also accept a generic list (and wrap in panel in that case) def layout_sidebar( sidebar: TagChildArg, main: TagChildArg, position: Literal["left", "right"] = "left" ) -> Tag: return row(sidebar, main) if position == "left" else row(main, sidebar) def panel_well(*args: TagChildArg, **kwargs: TagAttrArg) -> Tag: return div(*args, class_="well", **kwargs) def panel_sidebar(*args: TagChildArg, width: int = 4, **kwargs: TagAttrArg) -> Tag: return div( # A11y semantic landmark for sidebar tags.form(*args, role="complementary", class_="well", **kwargs), class_="col-sm-" + str(width), ) def panel_main(*args: TagChildArg, width: int = 8, **kwargs: TagAttrArg): return div( # A11y semantic landmark for main region *args, role="main", class_="col-sm-" + str(width), **kwargs, ) # TODO: replace `flowLayout()`/`splitLayout()` with a flexbox wrapper? # def panel_input(*args: TagChild, **kwargs: TagAttr): # return div(flowLayout(...), class_="shiny-input-panel") def panel_conditional( condition: str, *args: TagChildArg, # TODO: do we have an answer for shiny::NS() yet? ns: Callable[[str], str] = lambda x: x, **kwargs: TagAttrArg, ): return div(*args, data_display_if=condition, data_ns_prefix=ns(""), **kwargs) def panel_title(title: str, windowTitle: Optional[str] = None) -> TagList: if windowTitle is None: windowTitle = title return TagList( tags.head(tags.title(windowTitle)), h2(title), ) def panel_fixed(*args: TagChildArg, **kwargs: TagAttrArg) -> TagList: return panel_absolute(*args, fixed=True, **kwargs) def panel_absolute( *args: TagChildArg, top: Optional[str] = None, left: Optional[str] = None, right: Optional[str] = None, bottom: Optional[str] = None, width: Optional[str] = None, height: Optional[str] = None, draggable: bool = False, fixed: bool = False, cursor: Literal["auto", "move", "default", "inherit"] = "auto", **kwargs: TagAttrArg, ) -> TagList: style = css( top=top, left=left, right=right, bottom=bottom, width=width, height=height, position="fixed" if fixed else "absolute", cursor="move" if draggable else "inherit" if cursor == "auto" else cursor, ) divTag = div(*args, style=style, **kwargs) if not draggable: return TagList(divTag) divTag.add_class("draggable") deps = jqui_deps() deps.stylesheet = [] return TagList(deps, divTag, tags.script(HTML('$(".draggable").draggable();'))) def help_text(*args: TagChildArg, **kwargs: TagAttrArg) -> Tag: return span(*args, class_="help-block", **kwargs) shiny/__init__.py METASEP """Top-level package for Shiny.""" __author__ = """Winston Chang""" __email__ = "[email protected]" __version__ = "0.0.0.9000" # All objects imported into this scope will be available as shiny.foo from .bootstrap import * from .input_button import * from .input_check_radio import * from .input_date import * from .input_file import * from .input_numeric import * from .input_password import * from .input_select import * from .input_slider import * from .input_text import * from .modal import * from .navs import * from .notifications import * from .output import * from .page import * from .progress import * from .render import * from .reactives import * from .shinyapp import * from .shinysession import * from .shinymodule import * examples/simple/app.py METASEP # To run this app: # python3 app.py # Then point web browser to: # http://localhost:8000/ # Add parent directory to path, so we can find the prism module. # (This is just a temporary fix) import os import sys # This will load the shiny module dynamically, without having to install it. # This makes the debug/run cycle quicker. shiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) sys.path.insert(0, shiny_module_dir) from shiny import * ui = page_fluid( layout_sidebar( panel_sidebar( input_slider("n", "N", 0, 100, 20), ), panel_main( output_text_verbatim("txt", placeholder=True), output_plot("plot"), ), ), ) # from htmltools.core import HTMLDocument # from shiny import html_dependencies # HTMLDocument(TagList(ui, html_dependencies.shiny_deps())).save_html("temp/app.html") # A ReactiveVal which is exists outside of the session. shared_val = ReactiveVal(None) def server(session: ShinySession): @reactive() def r(): if session.input["n"] is None: return return session.input["n"] * 2 @session.output("txt") async def _(): val = r() return f"n*2 is {val}, session id is {get_current_session().id}" app = ShinyApp(ui, server) if __name__ == "__main__": app.run() # Alternately, to listen on a TCP port: # app.run(conn_type = "tcp") examples/myapp/app.py METASEP # To run this app: # python3 app.py # Then point web browser to: # http://localhost:8000/ # Add parent directory to path, so we can find the prism module. # (This is just a temporary fix) import os import sys # This will load the shiny module dynamically, without having to install it. # This makes the debug/run cycle quicker. shiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) sys.path.insert(0, shiny_module_dir) from shiny import * from shiny.fileupload import FileInfo # For plot rendering import numpy as np import matplotlib.pyplot as plt ui = page_fluid( layout_sidebar( panel_sidebar( input_slider("n", "N", 0, 100, 20), input_file("file1", "Choose file", multiple=True), ), panel_main( output_text_verbatim("txt"), output_text_verbatim("shared_txt"), output_plot("plot"), output_text_verbatim("file_content"), ), ), ) # A ReactiveVal which is shared across all sessions. shared_val = ReactiveVal(None) def server(session: ShinySession): @reactive() def r(): if session.input["n"] is None: return return session.input["n"] * 2 @session.output("txt") async def _(): val = r() return f"n*2 is {val}, session id is {get_current_session().id}" # This observer watches n, and changes shared_val, which is shared across # all running sessions. @observe() def _(): if session.input["n"] is None: return shared_val(session.input["n"] * 10) # Print the value of shared_val(). Changing it in one session should cause # this to run in all sessions. @session.output("shared_txt") def _(): return f"shared_val() is {shared_val()}" @session.output("plot") @render_plot(alt="A histogram") def _(): np.random.seed(19680801) x = 100 + 15 * np.random.randn(437) fig, ax = plt.subplots() ax.hist(x, session.input["n"], density=True) return fig @session.output("file_content") def _(): file_infos: list[FileInfo] = session.input["file1"] if not file_infos: return out_str = "" for file_info in file_infos: out_str += "====== " + file_info["name"] + " ======\n" with open(file_info["datapath"], "r") as f: out_str += f.read() return out_str app = ShinyApp(ui, server) if __name__ == "__main__": app.run() # Alternately, to listen on a TCP port: # app.run(conn_type = "tcp") examples/moduleapp/app.py METASEP # To run this app: # python3 app.py # Then point web browser to: # http://localhost:8000/ # Add parent directory to path, so we can find the prism module. # (This is just a temporary fix) import os import sys # This will load the shiny module dynamically, without having to install it. # This makes the debug/run cycle quicker. shiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) sys.path.insert(0, shiny_module_dir) from shiny import * # ============================================================================= # Counter module # ============================================================================= def counter_module_ui( ns: Callable[[str], str], label: str = "Increment counter" ) -> TagChildArg: return TagList( input_button(id=ns("button"), label=label), output_text_verbatim(id=ns("out")), ) def counter_module_server(session: ShinySessionProxy): count: ReactiveVal[int] = ReactiveVal(0) @observe() def _(): session.input["button"] isolate(lambda: count(count() + 1)) @session.output("out") def _() -> str: return f"Click count is {count()}" counter_module = ShinyModule(counter_module_ui, counter_module_server) # ============================================================================= # App that uses module # ============================================================================= ui = page_fluid( counter_module.ui("counter1", "Counter 1"), counter_module.ui("counter2", "Counter 2"), ) def server(session: ShinySession): counter_module.server("counter1") counter_module.server("counter2") app = ShinyApp(ui, server) if __name__ == "__main__": app.run() examples/inputs/app.py METASEP # This will load the shiny module dynamically, without having to install it. # This makes the debug/run cycle quicker. import os import sys shiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) sys.path.insert(0, shiny_module_dir) from shiny import * from htmltools import tags, HTML from fontawesome import icon_svg ui = page_fluid( panel_title("Hello prism ui"), layout_sidebar( panel_sidebar( input_slider( "n", "input_slider()", min=10, max=100, value=50, step=5, animate=True ), input_date("date", "input_date()"), input_date_range("date_rng", "input_date_range()"), input_text("txt", "input_text()", placeholder="Input some text"), input_text_area( "txt_area", "input_text_area()", placeholder="Input some text" ), input_numeric("num", "input_numeric()", 20), input_password("password", "input_password()"), input_checkbox("checkbox", "input_checkbox()"), input_checkbox_group( "checkbox_group", "input_checkbox_group()", {"Choice 1": "a", "Choice 2": "b"}, selected="b", inline=True, ), input_radio_buttons( "radio", "input_radio()", {"Choice 1": "a", "Choice 2": "b"} ), input_select( "select", "input_select()", { "Choice A": "a", "Group B": {"Choice B1": "b1", "Choice B2": "b2"}, "Group C": ["c1", "c2"], }, ), input_button("button", "input_button()", icon=icon_svg("check")), input_file("file", "File upload"), ), panel_main( output_plot("plot"), navs_tab_card( # TODO: output_plot() within a tab not working? nav("Inputs", output_ui("inputs"), icon=icon_svg("code")), nav( "Image", output_image("image", inline=True), icon=icon_svg("image") ), nav( "Misc", input_link( "link", "Show notification/progress", icon=icon_svg("info") ), tags.br(), input_button("btn", "Show modal", icon=icon_svg("info-circle")), panel_fixed( panel_well( "A fixed, draggable, panel", input_checkbox("checkbox2", "Check me!"), panel_conditional( "input.checkbox2 == true", "Thanks for checking!" ), ), draggable=True, width="fit-content", height="50px", top="50px", right="50px", ), icon=icon_svg("code"), ), ), ), ), ) import numpy as np import matplotlib.pyplot as plt def server(s: ShinySession): @s.output("inputs") @render_ui() def _() -> Tag: vals = [ f"<code>input_date()</code> {s.input['date']}", f"<code>input_date_range()</code>: {s.input['date_rng']}", f"<code>input_text()</code>: {s.input['txt']}", f"<code>input_text_area()</code>: {s.input['txt_area']}", f"<code>input_numeric()</code>: {s.input['num']}", f"<code>input_password()</code>: {s.input['password']}", f"<code>input_checkbox()</code>: {s.input['checkbox']}", f"<code>input_checkbox_group()</code>: {s.input['checkbox_group']}", f"<code>input_radio()</code>: {s.input['radio']}", f"<code>input_select()</code>: {s.input['select']}", f"<code>input_button()</code>: {s.input['button']}", ] return tags.pre(HTML("\n".join(vals))) np.random.seed(19680801) x_rand = 100 + 15 * np.random.randn(437) @s.output("plot") @render_plot(alt="A histogram") def _(): fig, ax = plt.subplots() ax.hist(x_rand, int(s.input["n"]), density=True) return fig @s.output("image") @render_image() def _(): from pathlib import Path dir = Path(__file__).resolve().parent return {"src": dir / "rstudio-logo.png", "width": "150px"} @observe() def _(): btn = s.input["btn"] if btn and btn > 0: modal_show(modal("Hello there!", easy_close=True)) @observe() def _(): link = s.input["link"] if link and link > 0: notification_show("A notification!") p = Progress() import time for i in range(30): p.set(i / 30, message="Computing") time.sleep(0.1) p.close() app = ShinyApp(ui, server) if __name__ == "__main__": app.run() examples/dynamic_ui/app.py METASEP # To run this app: # python3 app.py # Then point web browser to: # http://localhost:8000/ # Add parent directory to path, so we can find the prism module. # (This is just a temporary fix) import os import sys # This will load the shiny module dynamically, without having to install it. # This makes the debug/run cycle quicker. shiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) sys.path.insert(0, shiny_module_dir) from shiny import * # For plot rendering import numpy as np import matplotlib.pyplot as plt ui = page_fluid( layout_sidebar( panel_sidebar(h2("Dynamic UI"), output_ui("ui")), panel_main( output_text_verbatim("txt"), output_plot("plot"), ), ), ) def server(session: ShinySession): @reactive() def r(): if session.input["n"] is None: return return session.input["n"] * 2 @session.output("txt") async def _(): val = r() return f"n*2 is {val}, session id is {get_current_session().id}" @session.output("plot") @render_plot(alt="A histogram") def _(): np.random.seed(19680801) x = 100 + 15 * np.random.randn(437) fig, ax = plt.subplots() ax.hist(x, session.input["n"], density=True) return fig @session.output("ui") @render_ui() def _(): return input_slider("n", "N", 0, 100, 20) app = ShinyApp(ui, server) if __name__ == "__main__": app.run() # Alternately, to listen on a TCP port: # app.run(conn_type = "tcp") setup.py METASEP #!/usr/bin/env python """The setup script.""" from setuptools import setup, find_packages requirements = [] test_requirements = [ "pytest>=3", ] setup( author="Winston Chang", author_email="[email protected]", python_requires=">=3.7", classifiers=[ "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", ], description="A web development framework for Python.", install_requires=requirements, license="GNU General Public License v3", include_package_data=True, keywords="shiny", name="shiny", packages=find_packages(include=["shiny", "shiny.*"]), package_data={ "shiny": ["py.typed"], }, test_suite="tests", tests_require=test_requirements, url="https://github.com/rstudio/prism", version="0.0.0.9000", zip_safe=False, ) examples/update-input/app.py METASEP
[ { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),\n nav(\"panel2\", h2(\"This is the second panel.\")),\n id=\"inTabset\",\n ),\n ),\n ),\n)\n\n\ndef server(sess: ShinySession):\n @observe_async()", "type": "infile" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),\n nav(\"panel2\", h2(\"This is the second panel.\")),\n id=\"inTabset\",\n ),\n ),\n ),\n)\n\n\ndef server(sess: ShinySession):\n @observe_async()\n async def _():\n # We'll use these multiple times, so use short var names for\n # convenience.\n c_label = sess.input[\"control_label\"]\n c_num = sess.input[\"control_num\"]\n\n print(c_label)\n print(c_num)\n\n # Text =====================================================\n # Change both the label and the text\n await update_text(\n \"inText\",\n label=\"New \" + c_label,\n value=\"New text \" + str(c_num),\n )\n\n # Number ===================================================\n # Change the value\n await update_numeric(\"inNumber\", value=c_num)\n\n # Change the label, value, min, and max\n await update_numeric(\n \"inNumber2\",\n label=\"Number \" + c_label,\n value=c_num,\n min=c_num - 10,\n max=c_num + 10,\n step=5,\n )\n\n # Slider input =============================================\n # Only label and value can be set for slider\n await update_slider(\"inSlider\", label=\"Slider \" + c_label, value=c_num)\n\n # Slider range input =======================================\n # For sliders that pick out a range, pass in a vector of 2\n # values.\n await update_slider(\"inSlider2\", value=(c_num - 1, c_num + 1))\n\n # TODO: an NA means to not change that value (the low or high one)\n # await update_slider(\n # \"inSlider3\",\n # value=(NA, c_num+2)\n # )\n\n # Date input ===============================================\n # Only label and value can be set for date input\n await update_date(\"inDate\", label=\"Date \" + c_label, value=date(2013, 4, c_num))\n\n # Date range input =========================================\n # Only label and value can be set for date range input\n await update_date_range(\n \"inDateRange\",\n label=\"Date range \" + c_label,\n start=date(2013, 1, c_num),\n end=date(2013, 12, c_num),\n min=date(2001, 1, c_num),\n max=date(2030, 1, c_num),\n )\n\n # # Checkbox ===============================================\n await update_checkbox(\"inCheckbox\", value=c_num % 2)\n\n # Checkbox group ===========================================\n # Create a list of new options, where the name of the items\n # is something like 'option label x A', and the values are\n # 'option-x-A'.\n opts = zip(\n [f\"option label {c_num} {type}\" for type in [\"A\", \"B\"]],\n [f\"option-{c_num}-{type}\" for type in [\"A\", \"B\"]],\n )\n\n # Set the label, choices, and selected item\n await update_checkbox_group(\n \"inCheckboxGroup\",\n label=\"Checkbox group \" + c_label,\n choices=tuple(opts),\n selected=f\"option-{c_num}-A\",\n )\n\n # Radio group ==============================================\n await update_radio_buttons(\n \"inRadio\",\n label=\"Radio \" + c_label,\n choices=tuple(opts),\n selected=f\"option-{c_num}-A\",\n )\n # Select input =============================================\n # Create a list of new options, where the name of the items\n # is something like 'option label x A', and the values are\n # 'option-x-A'.\n await update_select(\n \"inSelect\",\n label=\"Select \" + c_label,\n choices=dict(opts),\n selected=f\"option-{c_num}-A\",\n )\n\n # Can also set the label and select an item (or more than\n # one if it's a multi-select)\n await update_select(\n \"inSelect2\",\n label=\"Select label \" + c_label,\n choices=dict(opts),\n selected=f\"option-{c_num}-B\",\n )\n\n # Tabset input =============================================\n # Change the selected tab.\n # The tabsetPanel must have been created with an 'id' argument\n await nav_select(\"inTabset\", selected=\"panel2\" if c_num % 2 else \"panel1\")\n\n", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),\n nav(\"panel2\", h2(\"This is the second panel.\")),\n id=\"inTabset\",\n ),\n ),\n ),\n)\n\n\ndef server(sess: ShinySession):", "type": "inproject" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),\n nav(\"panel2\", h2(\"This is the second panel.\")),\n id=\"inTabset\",\n ),\n ),\n ),\n)\n\n\ndef server(sess: ShinySession):\n @observe_async()\n async def _():\n # We'll use these multiple times, so use short var names for\n # convenience.\n c_label = sess.input[\"control_label\"]\n c_num = sess.input[\"control_num\"]\n\n print(c_label)\n print(c_num)\n\n # Text =====================================================\n # Change both the label and the text\n await update_text(\n \"inText\",\n label=\"New \" + c_label,\n value=\"New text \" + str(c_num),\n )\n\n # Number ===================================================\n # Change the value\n await update_numeric(\"inNumber\", value=c_num)\n\n # Change the label, value, min, and max\n await update_numeric(\n \"inNumber2\",\n label=\"Number \" + c_label,\n value=c_num,\n min=c_num - 10,\n max=c_num + 10,\n step=5,\n )\n\n # Slider input =============================================\n # Only label and value can be set for slider\n await update_slider(\"inSlider\", label=\"Slider \" + c_label, value=c_num)\n\n # Slider range input =======================================\n # For sliders that pick out a range, pass in a vector of 2\n # values.\n await update_slider(\"inSlider2\", value=(c_num - 1, c_num + 1))\n\n # TODO: an NA means to not change that value (the low or high one)\n # await update_slider(\n # \"inSlider3\",\n # value=(NA, c_num+2)\n # )\n\n # Date input ===============================================\n # Only label and value can be set for date input\n await update_date(\"inDate\", label=\"Date \" + c_label, value=date(2013, 4, c_num))\n\n # Date range input =========================================\n # Only label and value can be set for date range input\n await update_date_range(\n \"inDateRange\",\n label=\"Date range \" + c_label,\n start=date(2013, 1, c_num),\n end=date(2013, 12, c_num),\n min=date(2001, 1, c_num),\n max=date(2030, 1, c_num),\n )\n\n # # Checkbox ===============================================\n await update_checkbox(\"inCheckbox\", value=c_num % 2)\n\n # Checkbox group ===========================================\n # Create a list of new options, where the name of the items\n # is something like 'option label x A', and the values are\n # 'option-x-A'.\n opts = zip(\n [f\"option label {c_num} {type}\" for type in [\"A\", \"B\"]],\n [f\"option-{c_num}-{type}\" for type in [\"A\", \"B\"]],\n )\n\n # Set the label, choices, and selected item\n await update_checkbox_group(\n \"inCheckboxGroup\",\n label=\"Checkbox group \" + c_label,\n choices=tuple(opts),\n selected=f\"option-{c_num}-A\",\n )\n\n # Radio group ==============================================\n await update_radio_buttons(\n \"inRadio\",\n label=\"Radio \" + c_label,\n choices=tuple(opts),\n selected=f\"option-{c_num}-A\",\n )\n # Select input =============================================\n # Create a list of new options, where the name of the items\n # is something like 'option label x A', and the values are\n # 'option-x-A'.\n await update_select(\n \"inSelect\",\n label=\"Select \" + c_label,\n choices=dict(opts),\n selected=f\"option-{c_num}-A\",\n )\n\n # Can also set the label and select an item (or more than\n # one if it's a multi-select)\n await update_select(\n \"inSelect2\",\n label=\"Select label \" + c_label,\n choices=dict(opts),\n selected=f\"option-{c_num}-B\",\n )\n\n # Tabset input =============================================\n # Change the selected tab.\n # The tabsetPanel must have been created with an 'id' argument\n await nav_select(\"inTabset\", selected=\"panel2\" if c_num % 2 else \"panel1\")\n\n\napp = ShinyApp(ui, server, debug=True)\n\nif __name__ == \"__main__\":", "type": "common" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),\n nav(\"panel2\", h2(\"This is the second panel.\")),\n id=\"inTabset\",\n ),\n ),\n ),\n)\n\n\ndef server(sess: ShinySession):\n @observe_async()\n async def _():\n # We'll use these multiple times, so use short var names for\n # convenience.\n c_label = sess.input[\"control_label\"]\n c_num = sess.input[\"control_num\"]\n\n print(c_label)\n print(c_num)\n\n # Text =====================================================\n # Change both the label and the text\n await update_text(\n \"inText\",\n label=\"New \" + c_label,\n value=\"New text \" + str(c_num),\n )\n\n # Number ===================================================\n # Change the value\n await update_numeric(\"inNumber\", value=c_num)\n\n # Change the label, value, min, and max\n await update_numeric(\n \"inNumber2\",\n label=\"Number \" + c_label,\n value=c_num,\n min=c_num - 10,\n max=c_num + 10,\n step=5,\n )\n\n # Slider input =============================================\n # Only label and value can be set for slider\n await update_slider(\"inSlider\", label=\"Slider \" + c_label, value=c_num)\n\n # Slider range input =======================================\n # For sliders that pick out a range, pass in a vector of 2\n # values.\n await update_slider(\"inSlider2\", value=(c_num - 1, c_num + 1))\n\n # TODO: an NA means to not change that value (the low or high one)\n # await update_slider(\n # \"inSlider3\",\n # value=(NA, c_num+2)\n # )\n\n # Date input ===============================================\n # Only label and value can be set for date input\n await update_date(\"inDate\", label=\"Date \" + c_label, value=date(2013, 4, c_num))\n\n # Date range input =========================================\n # Only label and value can be set for date range input\n await update_date_range(\n \"inDateRange\",\n label=\"Date range \" + c_label,", "type": "common" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),\n nav(\"panel2\", h2(\"This is the second panel.\")),\n id=\"inTabset\",\n ),\n ),\n ),\n)\n\n\ndef server(sess: ShinySession):\n @observe_async()\n async def _():\n # We'll use these multiple times, so use short var names for\n # convenience.\n c_label = sess.input[\"control_label\"]\n c_num = sess.input[\"control_num\"]\n\n print(c_label)\n print(c_num)\n\n # Text =====================================================\n # Change both the label and the text\n await update_text(\n \"inText\",\n label=\"New \" + c_label,\n value=\"New text \" + str(c_num),\n )\n\n # Number ===================================================\n # Change the value\n await update_numeric(\"inNumber\", value=c_num)\n\n # Change the label, value, min, and max\n await update_numeric(\n \"inNumber2\",\n label=\"Number \" + c_label,\n value=c_num,\n min=c_num - 10,\n max=c_num + 10,\n step=5,\n )\n\n # Slider input =============================================\n # Only label and value can be set for slider\n await update_slider(\"inSlider\", label=\"Slider \" + c_label, value=c_num)\n\n # Slider range input =======================================\n # For sliders that pick out a range, pass in a vector of 2\n # values.\n await update_slider(\"inSlider2\", value=(c_num - 1, c_num + 1))\n\n # TODO: an NA means to not change that value (the low or high one)\n # await update_slider(\n # \"inSlider3\",\n # value=(NA, c_num+2)\n # )", "type": "non_informative" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),\n nav(\"panel2\", h2(\"This is the second panel.\")),\n id=\"inTabset\",\n ),\n ),\n ),\n)\n\n\ndef server(sess: ShinySession):\n @observe_async()\n async def _():\n # We'll use these multiple times, so use short var names for\n # convenience.\n c_label = sess.input[\"control_label\"]\n c_num = sess.input[\"control_num\"]\n\n print(c_label)\n print(c_num)\n\n # Text =====================================================\n # Change both the label and the text\n await update_text(\n \"inText\",\n label=\"New \" + c_label,\n value=\"New text \" + str(c_num),\n )\n\n # Number ===================================================\n # Change the value\n await update_numeric(\"inNumber\", value=c_num)\n\n # Change the label, value, min, and max\n await update_numeric(\n \"inNumber2\",\n label=\"Number \" + c_label,\n value=c_num,\n min=c_num - 10,\n max=c_num + 10,\n step=5,\n )\n\n # Slider input =============================================\n # Only label and value can be set for slider\n await update_slider(\"inSlider\", label=\"Slider \" + c_label, value=c_num)\n\n # Slider range input =======================================\n # For sliders that pick out a range, pass in a vector of 2\n # values.\n await update_slider(\"inSlider2\", value=(c_num - 1, c_num + 1))\n\n # TODO: an NA means to not change that value (the low or high one)\n # await update_slider(\n # \"inSlider3\",\n # value=(NA, c_num+2)\n # )\n", "type": "non_informative" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),\n nav(\"panel2\", h2(\"This is the second panel.\")),\n id=\"inTabset\",\n ),\n ),\n ),\n)\n\n\ndef server(sess: ShinySession):\n @observe_async()\n async def _():\n # We'll use these multiple times, so use short var names for\n # convenience.\n c_label = sess.input[\"control_label\"]\n c_num = sess.input[\"control_num\"]\n\n print(c_label)\n print(c_num)\n\n # Text =====================================================", "type": "non_informative" }, { "content": "# To run this app:", "type": "non_informative" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5", "type": "random" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),\n nav(\"panel2\", h2(\"This is the second panel.\")),\n id=\"inTabset\",\n ),\n ),\n ),\n)\n\n\ndef server(sess: ShinySession):\n @observe_async()\n async def _():\n # We'll use these multiple times, so use short var names for\n # convenience.\n c_label = sess.input[\"control_label\"]\n c_num = sess.input[\"control_num\"]\n\n print(c_label)\n print(c_num)\n\n # Text =====================================================\n # Change both the label and the text\n await update_text(\n \"inText\",\n label=\"New \" + c_label,\n value=\"New text \" + str(c_num),\n )\n\n # Number ===================================================\n # Change the value\n await update_numeric(\"inNumber\", value=c_num)\n\n # Change the label, value, min, and max\n await update_numeric(\n \"inNumber2\",\n label=\"Number \" + c_label,\n value=c_num,\n min=c_num - 10,\n max=c_num + 10,\n step=5,\n )\n\n # Slider input =============================================\n # Only label and value can be set for slider\n await update_slider(\"inSlider\", label=\"Slider \" + c_label, value=c_num)\n\n # Slider range input =======================================\n # For sliders that pick out a range, pass in a vector of 2\n # values.\n await update_slider(\"inSlider2\", value=(c_num - 1, c_num + 1))\n\n # TODO: an NA means to not change that value (the low or high one)\n # await update_slider(\n # \"inSlider3\",\n # value=(NA, c_num+2)\n # )\n\n # Date input ===============================================\n # Only label and value can be set for date input\n await update_date(\"inDate\", label=\"Date \" + c_label, value=date(2013, 4, c_num))\n\n # Date range input =========================================\n # Only label and value can be set for date range input\n await update_date_range(\n \"inDateRange\",\n label=\"Date range \" + c_label,\n start=date(2013, 1, c_num),\n end=date(2013, 12, c_num),\n min=date(2001, 1, c_num),\n max=date(2030, 1, c_num),\n )\n\n # # Checkbox ===============================================\n await update_checkbox(\"inCheckbox\", value=c_num % 2)\n\n # Checkbox group ===========================================\n # Create a list of new options, where the name of the items\n # is something like 'option label x A', and the values are\n # 'option-x-A'.\n opts = zip(\n [f\"option label {c_num} {type}\" for type in [\"A\", \"B\"]],\n [f\"option-{c_num}-{type}\" for type in [\"A\", \"B\"]],\n )\n\n # Set the label, choices, and selected item", "type": "random" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),\n nav(\"panel2\", h2(\"This is the second panel.\")),\n id=\"inTabset\",\n ),\n ),\n ),\n)\n\n\ndef server(sess: ShinySession):\n @observe_async()\n async def _():\n # We'll use these multiple times, so use short var names for\n # convenience.\n c_label = sess.input[\"control_label\"]\n c_num = sess.input[\"control_num\"]\n\n print(c_label)\n print(c_num)\n\n # Text =====================================================\n # Change both the label and the text\n await update_text(\n \"inText\",\n label=\"New \" + c_label,\n value=\"New text \" + str(c_num),\n )\n\n # Number ===================================================\n # Change the value\n await update_numeric(\"inNumber\", value=c_num)\n\n # Change the label, value, min, and max\n await update_numeric(\n \"inNumber2\",\n label=\"Number \" + c_label,\n value=c_num,\n min=c_num - 10,\n max=c_num + 10,\n step=5,\n )\n\n # Slider input =============================================\n # Only label and value can be set for slider\n await update_slider(\"inSlider\", label=\"Slider \" + c_label, value=c_num)\n\n # Slider range input =======================================\n # For sliders that pick out a range, pass in a vector of 2\n # values.\n await update_slider(\"inSlider2\", value=(c_num - 1, c_num + 1))\n\n # TODO: an NA means to not change that value (the low or high one)\n # await update_slider(\n # \"inSlider3\",\n # value=(NA, c_num+2)\n # )\n\n # Date input ===============================================\n # Only label and value can be set for date input\n await update_date(\"inDate\", label=\"Date \" + c_label, value=date(2013, 4, c_num))\n\n # Date range input =========================================\n # Only label and value can be set for date range input\n await update_date_range(\n \"inDateRange\",\n label=\"Date range \" + c_label,\n start=date(2013, 1, c_num),\n end=date(2013, 12, c_num),\n min=date(2001, 1, c_num),\n max=date(2030, 1, c_num),\n )\n\n # # Checkbox ===============================================\n await update_checkbox(\"inCheckbox\", value=c_num % 2)\n\n # Checkbox group ===========================================\n # Create a list of new options, where the name of the items\n # is something like 'option label x A', and the values are\n # 'option-x-A'.\n opts = zip(", "type": "random" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(", "type": "random" }, { "content": "# To run this app:\n# python3 app.py\n\n# Then point web browser to:\n# http://localhost:8000/\n\n# Add parent directory to path, so we can find the prism module.\n# (This is just a temporary fix)\nimport os\nimport sys\n\n# This will load the shiny module dynamically, without having to install it.\n# This makes the debug/run cycle quicker.\nshiny_module_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.insert(0, shiny_module_dir)\n\nfrom shiny import *\n\nui = page_fluid(\n panel_title(\"Changing the values of inputs from the server\"),\n row(\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs control the other inputs on the page\"),\n input_text(\n \"control_label\", \"This controls some of the labels:\", \"LABEL TEXT\"\n ),\n input_slider(\n \"control_num\", \"This controls values:\", min=1, max=20, value=15\n ),\n ),\n ),\n column(\n 3,\n panel_well(\n tags.h4(\"These inputs are controlled by the other inputs\"),\n input_text(\"inText\", \"Text input:\", value=\"start text\"),\n input_numeric(\n \"inNumber\", \"Number input:\", min=1, max=20, value=5, step=0.5\n ),\n input_numeric(\n \"inNumber2\", \"Number input 2:\", min=1, max=20, value=5, step=0.5\n ),\n input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),\n input_slider(\n \"inSlider2\", \"Slider input 2:\", min=1, max=20, value=(5, 15)\n ),\n input_slider(\n \"inSlider3\", \"Slider input 3:\", min=1, max=20, value=(5, 15)\n ),\n input_date(\"inDate\", \"Date input:\"),\n input_date_range(\"inDateRange\", \"Date range input:\"),\n ),\n ),\n column(\n 3,\n panel_well(\n input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),\n input_checkbox_group(\n \"inCheckboxGroup\",\n \"Checkbox group input:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_radio_buttons(\n \"inRadio\",\n \"Radio buttons:\",\n (\"label 1\", \"option1\", \"label 2\", \"option2\"),\n ),\n input_select(\n \"inSelect\",\n \"Select input:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n ),\n input_select(\n \"inSelect2\",\n \"Select input 2:\",\n {\"label 1\": \"option1\", \"label 2\": \"option2\"},\n multiple=True,\n ),\n ),\n navs_tab(\n nav(\"panel1\", h2(\"This is the first panel.\")),\n nav(\"panel2\", h2(\"This is the second panel.\")),\n id=\"inTabset\",\n ),\n ),\n ),\n)\n\n\ndef server(sess: ShinySession):\n @observe_async()\n async def _():\n # We'll use these multiple times, so use short var names for\n # convenience.\n c_label = sess.input[\"control_label\"]\n c_num = sess.input[\"control_num\"]\n\n print(c_label)\n print(c_num)\n\n # Text =====================================================\n # Change both the label and the text\n await update_text(\n \"inText\",\n label=\"New \" + c_label,\n value=\"New text \" + str(c_num),\n )\n\n # Number ===================================================\n # Change the value\n await update_numeric(\"inNumber\", value=c_num)\n\n # Change the label, value, min, and max\n await update_numeric(\n \"inNumber2\",\n label=\"Number \" + c_label,\n value=c_num,\n min=c_num - 10,\n max=c_num + 10,\n step=5,\n )\n\n # Slider input =============================================\n # Only label and value can be set for slider\n await update_slider(\"inSlider\", label=\"Slider \" + c_label, value=c_num)\n\n # Slider range input =======================================\n # For sliders that pick out a range, pass in a vector of 2\n # values.\n await update_slider(\"inSlider2\", value=(c_num - 1, c_num + 1))\n\n # TODO: an NA means to not change that value (the low or high one)\n # await update_slider(\n # \"inSlider3\",\n # value=(NA, c_num+2)\n # )\n\n # Date input ===============================================\n # Only label and value can be set for date input\n await update_date(\"inDate\", label=\"Date \" + c_label, value=date(2013, 4, c_num))\n\n # Date range input =========================================\n # Only label and value can be set for date range input\n await update_date_range(\n \"inDateRange\",\n label=\"Date range \" + c_label,\n start=date(2013, 1, c_num),\n end=date(2013, 12, c_num),\n min=date(2001, 1, c_num),\n max=date(2030, 1, c_num),\n )\n\n # # Checkbox ===============================================\n await update_checkbox(\"inCheckbox\", value=c_num % 2)\n\n # Checkbox group ===========================================\n # Create a list of new options, where the name of the items\n # is something like 'option label x A', and the values are\n # 'option-x-A'.\n opts = zip(\n [f\"option label {c_num} {type}\" for type in [\"A\", \"B\"]],\n [f\"option-{c_num}-{type}\" for type in [\"A\", \"B\"]],\n )\n\n # Set the label, choices, and selected item\n await update_checkbox_group(", "type": "random" } ]
[ " async def _():", "ui = page_fluid(", " panel_title(\"Changing the values of inputs from the server\"),", " row(", " column(", " panel_well(", " input_text(", " input_slider(", " input_text(\"inText\", \"Text input:\", value=\"start text\"),", " input_numeric(", " input_slider(\"inSlider\", \"Slider input:\", min=1, max=20, value=15),", " input_date(\"inDate\", \"Date input:\"),", " input_date_range(\"inDateRange\", \"Date range input:\"),", " input_checkbox(\"inCheckbox\", \"Checkbox input\", value=False),", " input_checkbox_group(", " input_radio_buttons(", " input_select(", "app = ShinyApp(ui, server, debug=True)", " navs_tab(", " nav(\"panel1\", h2(\"This is the first panel.\")),", " nav(\"panel2\", h2(\"This is the second panel.\")),", " @observe_async()", " app.run()", " start=date(2013, 1, c_num),", "", " # Date input ===============================================", " # Change both the label and the text", "# python3 app.py", " ),", " await update_checkbox_group(", " [f\"option label {c_num} {type}\" for type in [\"A\", \"B\"]],", " tags.h4(\"These inputs are controlled by the other inputs\"),", " \"inCheckboxGroup\"," ]
METASEP
56
azure__review-checklists
azure__review-checklists METASEP web/flaskmysql/app.py METASEP #app.py from flask import Flask, request, render_template, jsonify from flaskext.mysql import MySQL #pip install flask-mysql import pymysql import os app = Flask(__name__) # Get database credentials from environment variables mysql_server_fqdn = os.environ.get("MYSQL_FQDN") if mysql_server_fqdn == None: print("ERROR: Please define an environment variable 'MYSQL_FQDN' with the FQDN of the MySQL server") sys.exit(1) mysql_server_name = mysql_server_fqdn.split('.')[0] mysql_server_username = os.environ.get("MYSQL_USER") if mysql_server_username == None: print("ERROR: Please define an environment variable 'MYSQL_USER' with the FQDN of the MySQL username") sys.exit(1) if not mysql_server_username.__contains__('@'): mysql_server_username += '@' + mysql_server_name mysql_server_password = os.environ.get("MYSQL_PASSWORD") if mysql_server_password == None: print("ERROR: Please define an environment variable 'MYSQL_PASSWORD' with the FQDN of the MySQL password") sys.exit(1) # Open connection mysql = MySQL() app.config['MYSQL_DATABASE_USER'] = mysql_server_username app.config['MYSQL_DATABASE_PASSWORD'] = mysql_server_password app.config['MYSQL_DATABASE_DB'] = 'checklist' app.config['MYSQL_DATABASE_HOST'] = mysql_server_fqdn mysql.init_app(app) @app.route('/') def home(): app.logger.info("DEBUG: Connecting to database...") try: category_filter = request.args.get('category', None) status_filter = request.args.get('status', None) severity_filter = request.args.get('severity', None) except Exception as e: app.logger.info("ERROR reading query parameters for filters: {0}".format(str(e))) pass try: conn = mysql.connect() cursor = conn.cursor(pymysql.cursors.DictCursor) except Exception as e: app.logger.info("ERROR opening cursor to DB connection: {0}".format(str(e))) return jsonify(str(e)) try: sqlquery = "SELECT * from items" filter1_added = False # category filter if category_filter: if filter1_added: sqlquery += " AND " else: sqlquery += " WHERE " filter1_added = True sqlquery += "category = '{0}'".format(category_filter) # status filter if status_filter: if filter1_added: sqlquery += " AND " else: sqlquery += " WHERE " filter1_added = True sqlquery += "status = '{0}'".format(status_filter) # severity filter if severity_filter: if filter1_added: sqlquery += " AND " else: sqlquery += " WHERE " filter1_added = True sqlquery += "severity = '{0}'".format(severity_filter) # send queries app.logger.info ("Retrieving checklist items with query '{0}'".format(sqlquery)) cursor.execute(sqlquery) itemslist = cursor.fetchall() cursor.execute("SELECT DISTINCT category FROM items") categorylist = cursor.fetchall() cursor.execute("SELECT DISTINCT severity FROM items") severitylist = cursor.fetchall() cursor.execute("SELECT DISTINCT status FROM items") statuslist = cursor.fetchall() return render_template('index.html', itemslist=itemslist, categorylist=categorylist, severitylist=severitylist, statuslist=statuslist) except Exception as e: app.logger.info("ERROR sending query: {0}".format(str(e))) return jsonify(str(e)) @app.route("/update",methods=["POST","GET"]) def update(): app.logger.info("Processing {0} with request.form {1}".format(str(request.method), str(request.form))) try: conn = mysql.connect() cursor = conn.cursor(pymysql.cursors.DictCursor) if request.method == 'POST': field = request.form['field'] value = request.form['value'] editid = request.form['id'] app.logger.info("Processing POST for field '{0}', editid '{1}' and value '{2}'".format(field, value, editid)) if field == 'comment' and value != '': sql = "UPDATE items SET comments=%s WHERE guid=%s" data = (value, editid) conn = mysql.connect() cursor = conn.cursor() app.logger.info ("Sending SQL query '{0}' with data '{1}'".format(sql, str(data))) cursor.execute(sql, data) conn.commit() elif field == 'status' and value != '': sql = "UPDATE items SET status=%s WHERE guid=%s" data = (value, editid) conn = mysql.connect() cursor = conn.cursor() app.logger.info ("Sending SQL query '{0}' with data '{1}'".format(sql, str(data))) cursor.execute(sql, data) conn.commit() else: app.logger.info ("Field is '{0}', value is '{1}': not doing anything".format(field, value)) success = 1 return jsonify(success) except Exception as e: app.logger.info("Oh oh, there is an error: {0}".format(str(e))) success = 0 return jsonify(success) finally: cursor.close() conn.close() if __name__ == "__main__": app.run(host='0.0.0.0', debug=True) web/fillgraphdb/graph_db.py METASEP import os import sys import pymysql import json import time import requests import azure.mgmt.resourcegraph as arg from datetime import datetime from azure.mgmt.resource import SubscriptionClient from azure.identity import AzureCliCredential from azure.identity import DefaultAzureCredential from azure.identity import ClientSecretCredential # Database and table name mysql_db_name = "checklist" mysql_db_table = "items" use_ssl = "yes" # Format a string to be included in a SQL query as value def escape_quotes (this_value): return str(this_value).replace("'", "\\'") # Function to send an Azure Resource Graph query def get_resources (graph_query, argClient, subsList, argQueryOptions): # TO DO: Authentication should probably happen outside of this function try: # Create query argQuery = arg.models.QueryRequest(subscriptions=subsList, query=graph_query, options=argQueryOptions) # Run query and return results argResults = argClient.resources(argQuery) print("DEBUG: query results: {0}".format(str(argResults))) return argResults except Exception as e: print("ERROR: Error sending Azure Resource Graph query to Azure: {0}".format(str(e))) # sys.exit(0) # Debugging.... Probably this should be exit(1) return '' # Wait for IMDS endpoint to be available try: wait_max_intervals = int(os.environ.get("WAIT_INTERVALS")) print ("DEBUG: WAIT_INTERVALS read from environment variable: {0}".format(str(wait_max_intervals))) except: wait_max_intervals = 5 print ("DEBUG: WAIT_INTERVALS set to default value: {0}".format(str(wait_max_intervals))) wait_interval = 10.0 imds_url = 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://management.azure.com/' imds_headers = { "Metadata" : "true" } imds_tries = 0 break_loop = False print ('DEBUG: Going into waiting loop to make sure the metadata endpoint is active...') while not break_loop: imds_tries += 1 print ("DEBUG: We are in the loop, pass {0}/{1} ({2}). Trying the IMDS endpoint...".format(str(imds_tries), str(wait_max_intervals), str(datetime.now()))) if imds_tries > wait_max_intervals: print("ERROR: max wait intervals exceeded when waiting for IMDS to answer, hopefully you specified some SP credentials as SP variables...") break_loop = True else: print ("DEBUG: Sending GET request to {0}...".format(imds_url)) try: imds_response = requests.get(imds_url, headers=imds_headers, timeout=1) if imds_response.status_code >= 200 and imds_response.status_code <= 299: print ("DEBUG: IMDS endpoint seems to be working, received status code {0} and answer {1}".format(str(imds_response.status_code), str(imds_response.text))) break_loop = True else: print ("DEBUG: IMDS endpoint doesnt seem to be working, received status code {0} and answer {1}".format(str(imds_response.status_code), str(imds_response.text))) except Exception as e: print("DEBUG: Error sending request to IMDS endpoint: {0}".format(str(e))) pass if not break_loop: print("DEBUG: Going to sleep {0} seconds before next try...".format(str(wait_interval))) time.sleep (wait_interval) # Authenticate to Azure, either with Managed Identity or SP print('DEBUG: Authenticating to Azure...') try: print('DEBUG: Getting environment variables...') # credential = AzureCliCredential() # Get your credentials from Azure CLI (development only!) and get your subscription list tenant_id = os.environ.get("AZURE_TENANT_ID") client_id = os.environ.get("AZURE_CLIENT_ID") client_secret = os.environ.get("AZURE_CLIENT_SECRET") except Exception as e: print("ERROR: Error getting environment variables: {0}".format(str(e))) tenant_id = None client_id = None client_secret = None pass try: if tenant_id and client_id and client_secret: print("DEBUG: Service principal credentials (client ID {0}, tenant ID {1}) retrieved from environment variables, trying SP-based authentication now...".format(str(client_id), str(tenant_id))) credential = ClientSecretCredential(tenant_id=tenant_id, client_id=client_id, client_secret=client_secret) else: print('DEBUG: Service principal credentials could not be retrieved from environment variables, trying default authentication method with Managed Identity...') credential = DefaultAzureCredential() # Managed identity except Exception as e: print("ERROR: Error during Azure Authentication: {0}".format(str(e))) sys.exit(1) try: print('DEBUG: Getting subscriptions...') subsClient = SubscriptionClient(credential) subsRaw = [] for sub in subsClient.subscriptions.list(): subsRaw.append(sub.as_dict()) subsList = [] for sub in subsRaw: subsList.append(sub.get('subscription_id')) print ("DEBUG: provided credentials give access to {0} subscription(s)".format(str(len(subsList)))) # Create Azure Resource Graph client and set options print('DEBUG: Creating client object...') argClient = arg.ResourceGraphClient(credential) argQueryOptions = arg.models.QueryRequestOptions(result_format="objectArray") except Exception as e: print("ERROR: Error creating resource graph client object: {0}".format(str(e))) sys.exit(1) # Get database credentials from environment variables mysql_server_fqdn = os.environ.get("MYSQL_FQDN") if mysql_server_fqdn == None: print("ERROR: Please define an environment variable 'MYSQL_FQDN' with the FQDN of the MySQL server") sys.exit(1) else: print("DEBUG: mysql FQDN retrieved from environment variables: '{0}'".format(mysql_server_fqdn)) mysql_server_name = mysql_server_fqdn.split('.')[0] mysql_server_username = os.environ.get("MYSQL_USER") if mysql_server_username == None: print("ERROR: Please define an environment variable 'MYSQL_USER' with the FQDN of the MySQL username") sys.exit(1) else: print("DEBUG: mysql authentication username retrieved from environment variables: '{0}'".format(mysql_server_username)) if not mysql_server_username.__contains__('@'): mysql_server_username += '@' + mysql_server_name mysql_server_password = os.environ.get("MYSQL_PASSWORD") if mysql_server_password == None: print("ERROR: Please define an environment variable 'MYSQL_PASSWORD' with the FQDN of the MySQL password") sys.exit(1) else: print("DEBUG: mysql authentication password retrieved from environment variables: {0}".format("********")) # Create connection to MySQL server and number of records print ("DEBUG: Connecting to '{0}' with username '{1}'...".format(mysql_server_fqdn, mysql_server_username)) if use_ssl == 'yes': db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, database = mysql_db_name, passwd = mysql_server_password, ssl = {'ssl':{'ca': 'BaltimoreCyberTrustRoot.crt.pem'}}) else: db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, database = mysql_db_name, passwd = mysql_server_password) sql_query = "SELECT * FROM {0} WHERE graph_query_success IS NOT null AND graph_query_failure IS NOT null AND graph_query_success != 'None' AND graph_query_failure != 'None';".format (mysql_db_table) cursor = db.cursor() cursor.execute(sql_query) rows = cursor.fetchall() row_cnt = 0 if len(rows) > 0: for row in rows: row_cnt += 1 result_text = '' item_guid = row[0] item_success_query = row[10] item_failure_query = row[11] # print ("DEBUG {0}: '{1}', '{2}'".format(item_guid, item_success_query, item_failure_query)) success_resources = str(get_resources(item_success_query, argClient, subsList, argQueryOptions)).replace("'", '"') success_resources = success_resources.replace(': None', ': "None"') # print ("DEBUG: SUCCESS QUERY: {0}".format(success_resources)) if success_resources: try: success_resources_object = json.loads(success_resources) except: print("ERROR: JSON returned from Azure Graph Query not valid: {0}".format(success_resources)) for resource in success_resources_object['data']: if result_text: result_text += '\n' result_text += "SUCCESS: {0}".format(resource["id"]) failure_resources = str(get_resources(item_failure_query, argClient, subsList, argQueryOptions)).replace("'", '"') failure_resources = failure_resources.replace(': None', ': "None"') # print ("DEBUG: FAILURE QUERY: {0}".format(failure_resources)) if failure_resources: try: failure_resources_object = json.loads(failure_resources) except: print("ERROR: JSON returned from Azure Graph Query not valid: {0}".format(failure_resources)) for resource in failure_resources_object['data']: if result_text: result_text += '\n' result_text += "FAILURE: {0}".format(resource["id"]) # print ("DEBUG: Result summary: \n{0}".format(result_text)) if result_text: update_query = "UPDATE items SET graph_query_result = '{0}' WHERE guid = '{1}';".format(result_text, item_guid) print ("DEBUG: sending SQL query '{0}'".format(update_query)) try: cursor.execute(update_query) db.commit() except Exception as e: print("ERROR: Error sending SQL query to MySql server: {0}".format(str(e))) pass else: print("DEBUG: No results could be retrieved for the success and failure queries of checklist item {0}".format(item_guid)) else: row_count = 0 print ("INFO: Processed table {0} in database {1} with {2} records with graph queries. Happy review!".format(mysql_db_table, mysql_db_name, str(row_cnt))) # Bye db.close() web/filldb/fill_db.py METASEP import requests import json import os import sys import pymysql # Database and table name mysql_db_name = "checklist" mysql_db_table = "items" use_ssl = "yes" # Format a string to be included in a SQL query as value def escape_quotes(this_value): return str(this_value).replace("'", "\\'") # Get database credentials from environment variables mysql_server_fqdn = os.environ.get("MYSQL_FQDN") if mysql_server_fqdn == None: print("ERROR: Please define an environment variable 'MYSQL_FQDN' with the FQDN of the MySQL server") sys.exit(1) mysql_server_name = mysql_server_fqdn.split('.')[0] mysql_server_username = os.environ.get("MYSQL_USER") if mysql_server_username == None: print("ERROR: Please define an environment variable 'MYSQL_USER' with the FQDN of the MySQL username") sys.exit(1) if not mysql_server_username.__contains__('@'): mysql_server_username += '@' + mysql_server_name mysql_server_password = os.environ.get("MYSQL_PASSWORD") if mysql_server_password == None: print("ERROR: Please define an environment variable 'MYSQL_PASSWORD' with the FQDN of the MySQL password") sys.exit(1) # Create connection to MySQL server and get version print ("INFO: Connecting to {0} with username {1}...".format(mysql_server_fqdn, mysql_server_username)) if use_ssl == 'yes': db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, passwd = mysql_server_password, ssl = {'ssl':{'ca': 'BaltimoreCyberTrustRoot.crt.pem'}}) else: db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, passwd = mysql_server_password) sql_query = "SELECT VERSION();" cursor = db.cursor() cursor.execute(sql_query) rows = cursor.fetchall() data = "" if len(rows) > 0: for row in rows: if len(data) > 0: data += ', ' data += str(''.join(row)) print ("INFO: Connected to MySQL server {0} with version {1}".format(mysql_server_fqdn, data)) # Delete db if existed sql_query = "DROP DATABASE IF EXISTS {0};".format(mysql_db_name) # print ("Sending query: {0}".format(sql_query)) cursor.execute(sql_query) db.commit() # Create database sql_query = "CREATE DATABASE IF NOT EXISTS {0};".format(mysql_db_name) # print ("Sending query: {0}".format(sql_query)) cursor.execute(sql_query) db.commit() sql_query = "USE {0}".format(mysql_db_name) # print ("Sending query: {0}".format(sql_query)) cursor.execute(sql_query) db.commit() # Create table sql_query = """CREATE TABLE {0} ( guid varchar(40), text varchar(1024), description varchar(1024), link varchar(255), training varchar(255), comments varchar(1024), severity varchar(10), status varchar(15), category varchar(255), subcategory varchar(255), graph_query_success varchar(1024), graph_query_failure varchar(1024), graph_query_result varchar(4096) );""".format(mysql_db_table) # print ("DEBUG: Sending query: {0}".format(sql_query)) cursor.execute(sql_query) db.commit() # Download checklist technology = os.environ.get("CHECKLIST_TECHNOLOGY") if technology: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/" + technology + "_checklist.en.json" else: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/lz_checklist.en.json" response = requests.get(checklist_url) # If download was successful if response.status_code == 200: print ("INFO: File {0} downloaded successfully".format(checklist_url)) try: # Deserialize JSON to object variable checklist_object = json.loads(response.text) except Exception as e: print("Error deserializing JSON content: {0}".format(str(e))) sys.exit(1) # Get default status from the JSON, default to "Not verified" try: status_list = checklist_object.get("status") default_status = status_list[0].get("name") except: default_status = "Not verified" pass # For each checklist item, add a row to mysql DB row_counter = 0 for item in checklist_object.get("items"): guid = item.get("guid") category = item.get("category") subcategory = item.get("subcategory") text = escape_quotes(item.get("text")) description = escape_quotes(item.get("description")) severity = item.get("severity") link = item.get("link") training = item.get("training") status = default_status graph_query_success = escape_quotes(item.get("graph_success")) graph_query_failure = escape_quotes(item.get("graph_failure")) # print("DEBUG: Adding to table {0}: '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', '{8}', '{9}', '{10}'".format(mysql_db_table, category, subcategory, text, description, severity, link, training, graph_query_success, graph_query_failure, guid)) sql_query = """INSERT INTO {0} (category,subcategory,text,description,severity,link,training,graph_query_success,graph_query_failure,guid,status) VALUES ('{1}','{2}','{3}','{4}','{5}', '{6}','{7}','{8}','{9}','{10}', '{11}');""".format(mysql_db_table, category, subcategory, text, description, severity, link, training, graph_query_success, graph_query_failure, guid, status) # print ("DEBUG: Sending query: {0}".format(sql_query)) cursor.execute(sql_query) db.commit() row_counter += 1 else: print ("Error downloading {0}".format(checklist_url)) # Bye print("INFO: {0} rows added to database.".format(str(row_counter))) db.close() web/filldb/check_db.py METASEP import os import sys import pymysql # Database and table name mysql_db_name = "checklist" mysql_db_table = "items" use_ssl = "yes" # Format a string to be included in a SQL query as value def escape_quotes(this_value): return str(this_value).replace("'", "\\'") # Get database credentials from environment variables mysql_server_fqdn = os.environ.get("MYSQL_FQDN") if mysql_server_fqdn == None: print("Please define an environment variable 'MYSQL_FQDN' with the FQDN of the MySQL server") sys.exit(1) mysql_server_name = mysql_server_fqdn.split('.')[0] mysql_server_username = os.environ.get("MYSQL_USER") if mysql_server_username == None: print("Please define an environment variable 'MYSQL_USER' with the FQDN of the MySQL username") sys.exit(1) if not mysql_server_username.__contains__('@'): mysql_server_username += '@' + mysql_server_name mysql_server_password = os.environ.get("MYSQL_PASSWORD") if mysql_server_password == None: print("Please define an environment variable 'MYSQL_PASSWORD' with the FQDN of the MySQL password") sys.exit(1) # Create connection to MySQL server and number of records print ("Connecting to {0} with username {1}...".format(mysql_server_fqdn, mysql_server_username)) if use_ssl == 'yes': db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, database = mysql_db_name, passwd = mysql_server_password, ssl = {'ssl':{'ca': 'BaltimoreCyberTrustRoot.crt.pem'}}) else: db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, database = mysql_db_name, passwd = mysql_server_password) sql_query = "SELECT COUNT(*) FROM {0};".format (mysql_db_table) cursor = db.cursor() cursor.execute(sql_query) rows = cursor.fetchall() if len(rows) > 0: row_count = rows[0][0] else: row_count = 0 print ("Table {0} in database {1} contains {2} records".format(mysql_db_table, mysql_db_name, str(row_count))) # Bye db.close() scripts/update_excel_xlwings.py METASEP ###################################################################### # # This script reads the checklist items from the latest checklist file # in Github (or from a local file) and populates an Excel spreadsheet # with the contents. # # Last updated: March 2022 # ###################################################################### import json import argparse import sys import os import requests import xlwings as xw # Get input arguments parser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results') parser.add_argument('--checklist-file', dest='checklist_file', action='store', help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github') parser.add_argument('--technology', dest='technology', action='store', help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github') parser.add_argument('--excel-file', dest='excel_file', action='store', help='You need to supply an Excel file where the checklist will be written') parser.add_argument('--app-mode', dest='appmode', action='store_true', default=False, help='Open Excel workbook in App mode, not great for systems without Excel installed (default: False)') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() checklist_file = args.checklist_file excel_file = args.excel_file technology = args.technology # Constants worksheet_checklist_name = 'Checklist' row1 = 10 # First row after which the Excel spreadsheet will be updated col_checklist_name = "A" row_checklist_name = "6" guid_column_index = "L" comment_column_index = "G" sample_cell_index = 'A2' col_area = "A" col_subarea = "B" col_check = "C" col_desc = "D" col_sev = "E" col_status = "F" col_comment = "G" col_link = "H" col_training = "I" col_arg_success = "J" col_arg_failure = "K" col_guid = "L" info_link_text = 'More info' training_link_text = 'Training' worksheet_values_name = 'Values' values_row1 = 2 col_values_severity = "A" col_values_status = "B" col_values_area = "C" col_values_description = "H" # Download checklist if checklist_file: if args.verbose: print("DEBUG: Opening checklist file", checklist_file) # Get JSON try: with open(checklist_file) as f: checklist_data = json.load(f) except Exception as e: print("ERROR: Error when processing JSON file", checklist_file, "-", str(e)) sys.exit(1) else: if technology: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/" + technology + "_checklist.en.json" else: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/lz_checklist.en.json" if args.verbose: print("DEBUG: Downloading checklist file from", checklist_url) response = requests.get(checklist_url) # If download was successful if response.status_code == 200: if args.verbose: print ("DEBUG: File {0} downloaded successfully".format(checklist_url)) try: # Deserialize JSON to object variable checklist_data = json.loads(response.text) except Exception as e: print("Error deserializing JSON content: {0}".format(str(e))) sys.exit(1) # Load workbook try: if args.appmode: print("DEBUG: opening Excel workbook in app mode 'App().books.open'...") app = xw.App() wb = app.books.open(excel_file) else: print("DEBUG: opening Excel workbook with xb.Book function...") wb = xw.Book(excel_file) # This line is occassionally giving the error "(-2147352570, 'Unknown name.', None, None)" if args.verbose: print("DEBUG: workbook", excel_file, "opened successfully") except Exception as e: print("ERROR: Error when opening Excel file", excel_file, "-", str(e)) sys.exit(1) # Get worksheet try: ws = wb.sheets[worksheet_checklist_name] if args.verbose: print("DEBUG: worksheet", worksheet_checklist_name, "selected successfully") except Exception as e: print("ERROR: Error when selecting worksheet", worksheet_checklist_name, "-", str(e)) sys.exit(1) # Set checklist name try: ws.range(col_checklist_name + row_checklist_name).value = checklist_data["metadata"]["name"] if args.verbose: print("DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'".format(checklist_data["metadata"]["name"])) except Exception as e: print("ERROR: Error when selecting worksheet", worksheet_checklist_name, "-", str(e)) sys.exit(1) # Get default status from the JSON, default to "Not verified" try: status_list = checklist_data.get("status") default_status = status_list[0].get("name") if args.verbose: print ("DEBUG: default status retrieved from checklist: '{0}'".format(default_status)) except: default_status = "Not verified" if args.verbose: print ("DEBUG: Using default status 'Not verified'") pass # For each checklist item, add a row to spreadsheet row_counter = row1 for item in checklist_data.get("items"): # Read variables from JSON guid = item.get("guid") category = item.get("category") subcategory = item.get("subcategory") text = item.get("text") description = item.get("description") severity = item.get("severity") link = item.get("link") training = item.get("training") status = default_status graph_query_success = item.get("graph_success") graph_query_failure = item.get("graph_failure") # Update Excel ws.range(col_area + str(row_counter)).value = category ws.range(col_subarea + str(row_counter)).value = subcategory ws.range(col_check + str(row_counter)).value = text ws.range(col_desc + str(row_counter)).value = description ws.range(col_sev + str(row_counter)).value = severity ws.range(col_status + str(row_counter)).value = status # ws.range(col_link + str(row_counter)).value = link if link != None: link_elements = link.split('#') link_address = link_elements[0] if len(link_elements) > 1: link_subaddress = link_elements[1] else: link_subaddress = "" ws.api.Hyperlinks.Add (Anchor=ws.range(col_link + str(row_counter)).api, Address=link_address, SubAddress=link_subaddress, ScreenTip="", TextToDisplay=info_link_text) # ws.range(col_training + str(row_counter)).value = training if training != None: training_elements = training.split('#') training_address = training_elements[0] if len(training_elements) > 1: training_subaddress = training_elements[1] else: training_subaddress = "" ws.api.Hyperlinks.Add (Anchor=ws.range(col_training + str(row_counter)).api, Address=training_address, SubAddress=training_subaddress, ScreenTip="", TextToDisplay=training_link_text) # GUID and ARG queries ws.range(col_arg_success + str(row_counter)).value = graph_query_success ws.range(col_arg_failure + str(row_counter)).value = graph_query_failure ws.range(col_guid + str(row_counter)).value = guid # Next row row_counter += 1 # Display summary if args.verbose: print("DEBUG:", str(row_counter - row1), "checks addedd to Excel spreadsheet") # Get worksheet try: wsv = wb.sheets[worksheet_values_name] if args.verbose: print("DEBUG: worksheet", worksheet_values_name, "selected successfully") except Exception as e: print("ERROR: Error when selecting worksheet", worksheet_values_name, "-", str(e)) sys.exit(1) # Update categories row_counter = values_row1 for item in checklist_data.get("categories"): area = item.get("name") wsv.range(col_values_area + str(row_counter)).value = area row_counter += 1 # Display summary if args.verbose: print("DEBUG:", str(row_counter - values_row1), "categories addedd to Excel spreadsheet") # Update status row_counter = values_row1 for item in checklist_data.get("status"): status = item.get("name") description = item.get("description") wsv.range(col_values_status + str(row_counter)).value = status wsv.range(col_values_description + str(row_counter)).value = description row_counter += 1 # Display summary if args.verbose: print("DEBUG:", str(row_counter - values_row1), "statuses addedd to Excel spreadsheet") # Update severities row_counter = values_row1 for item in checklist_data.get("severities"): severity = item.get("name") wsv.range(col_values_severity + str(row_counter)).value = severity row_counter += 1 # Display summary if args.verbose: print("DEBUG:", str(row_counter - values_row1), "severities addedd to Excel spreadsheet") # Close book if args.verbose: print("DEBUG: saving workbook", excel_file) try: wb.save() if args.appmode: app.quit() # If we were in app mode, close Excel except Exception as e: print("ERROR: Error when saving Excel file", excel_file, "-", str(e)) sys.exit(1) scripts/update_excel_openpyxl.py METASEP ###################################################################### # # This script reads the checklist items from the latest checklist file # in Github (or from a local file) and populates an Excel spreadsheet # with the contents. # # Last updated: March 2022 # ###################################################################### import json import argparse import sys import os import requests import glob from openpyxl import load_workbook from openpyxl.worksheet.datavalidation import DataValidation # Get input arguments parser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results') parser.add_argument('--checklist-file', dest='checklist_file', action='store', help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github') parser.add_argument('--only-english', dest='only_english', action='store_true', default=False, help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)') parser.add_argument('--find-all', dest='find_all', action='store_true', default=False, help='if checklist files are specified, find all the languages for the given checklists (default: False)') parser.add_argument('--technology', dest='technology', action='store', help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github') parser.add_argument('--excel-file', dest='excel_file', action='store', help='You need to supply an Excel file where the checklist will be written') parser.add_argument('--output-excel-file', dest='output_excel_file', action='store', help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place') parser.add_argument('--output-path', dest='output_path', action='store', help='If using --output-name-is-input-name, folder where to store the results') parser.add_argument('--output-name-is-input-name', dest='output_name_is_input_name', action='store_true', default=False, help='Save the output in a file with the same filename as the JSON input, but with xlsx extension') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() checklist_file = args.checklist_file excel_file = args.excel_file technology = args.technology # Constants worksheet_checklist_name = 'Checklist' row1 = 8 # First row after which the Excel spreadsheet will be updated col_checklist_name = "A" row_checklist_name = "4" guid_column_index = "L" comment_column_index = "G" sample_cell_index = 'A4' col_area = "A" col_subarea = "B" col_check = "C" col_desc = "D" col_sev = "E" col_status = "F" col_comment = "G" col_link = "H" col_training = "I" col_arg_success = "J" col_arg_failure = "K" col_guid = "L" info_link_text = 'More info' training_link_text = 'Training' worksheet_values_name = 'Values' values_row1 = 2 col_values_severity = "A" col_values_status = "B" col_values_area = "C" col_values_description = "H" # Main function def update_excel_file(input_excel_file, output_excel_file, checklist_data): # Load workbook try: wb = load_workbook(filename = input_excel_file) if args.verbose: print("DEBUG: workbook", input_excel_file, "opened successfully") except Exception as e: print("ERROR: Error when opening Excel file", input_excel_file, "-", str(e)) sys.exit(1) # Get worksheet try: ws = wb[worksheet_checklist_name] if args.verbose: print("DEBUG: worksheet", worksheet_checklist_name, "selected successfully") except Exception as e: print("ERROR: Error when selecting worksheet", worksheet_checklist_name, "-", str(e)) sys.exit(1) # Set checklist name try: ws[col_checklist_name + row_checklist_name] = checklist_data["metadata"]["name"] if args.verbose: print("DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'".format(checklist_data["metadata"]["name"])) except Exception as e: print("ERROR: Error when selecting worksheet", worksheet_checklist_name, "-", str(e)) sys.exit(1) # Get default status from the JSON, default to "Not verified" try: status_list = checklist_data.get("status") default_status = status_list[0].get("name") if args.verbose: print ("DEBUG: default status retrieved from checklist: '{0}'".format(default_status)) except: default_status = "Not verified" if args.verbose: print ("DEBUG: Using default status 'Not verified'") pass # For each checklist item, add a row to spreadsheet row_counter = row1 for item in checklist_data.get("items"): # Read variables from JSON guid = item.get("guid") category = item.get("category") subcategory = item.get("subcategory") text = item.get("text") description = item.get("description") severity = item.get("severity") link = item.get("link") training = item.get("training") status = default_status graph_query_success = item.get("graph_success") graph_query_failure = item.get("graph_failure") # Update Excel ws[col_area + str(row_counter)].value = category ws[col_subarea + str(row_counter)].value = subcategory ws[col_check + str(row_counter)].value = text ws[col_desc + str(row_counter)].value = description ws[col_sev + str(row_counter)].value = severity ws[col_status + str(row_counter)].value = status ws[col_link + str(row_counter)].value = link # if link != None: # link_elements = link.split('#') # link_address = link_elements[0] # if len(link_elements) > 1: # link_subaddress = link_elements[1] # else: # link_subaddress = "" # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip="", TextToDisplay=info_link_text) ws[col_training + str(row_counter)].value = training # if training != None: # training_elements = training.split('#') # training_address = training_elements[0] # if len(training_elements) > 1: # training_subaddress = training_elements[1] # else: # training_subaddress = "" # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip="", TextToDisplay=training_link_text) # GUID and ARG queries ws[col_arg_success + str(row_counter)].value = graph_query_success ws[col_arg_failure + str(row_counter)].value = graph_query_failure ws[col_guid + str(row_counter)].value = guid # Next row row_counter += 1 # Display summary if args.verbose: number_of_checks = row_counter - row1 print("DEBUG:", str(number_of_checks), "checks addedd to Excel spreadsheet") # Get worksheet try: wsv = wb[worksheet_values_name] if args.verbose: print("DEBUG: worksheet", worksheet_values_name, "selected successfully") except Exception as e: print("ERROR: Error when selecting worksheet", worksheet_values_name, "-", str(e)) sys.exit(1) # Update categories row_counter = values_row1 for item in checklist_data.get("categories"): area = item.get("name") wsv[col_values_area + str(row_counter)].value = area row_counter += 1 # Display summary if args.verbose: print("DEBUG:", str(row_counter - values_row1), "categories addedd to Excel spreadsheet") # Update status row_counter = values_row1 for item in checklist_data.get("status"): status = item.get("name") description = item.get("description") wsv[col_values_status + str(row_counter)].value = status wsv[col_values_description + str(row_counter)].value = description row_counter += 1 # Display summary if args.verbose: print("DEBUG:", str(row_counter - values_row1), "statuses addedd to Excel spreadsheet") # Update severities row_counter = values_row1 for item in checklist_data.get("severities"): severity = item.get("name") wsv[col_values_severity + str(row_counter)].value = severity row_counter += 1 # Display summary if args.verbose: print("DEBUG:", str(row_counter - values_row1), "severities addedd to Excel spreadsheet") # Data validation # dv = DataValidation(type="list", formula1='=Values!$B$2:$B$6', allow_blank=True, showDropDown=True) dv = DataValidation(type="list", formula1='=Values!$B$2:$B$6', allow_blank=True) rangevar = col_status + str(row1) +':' + col_status + str(row1 + number_of_checks) if args.verbose: print("DEBUG: adding data validation to range", rangevar) dv.add(rangevar) ws.add_data_validation(dv) # Close book if args.verbose: print("DEBUG: saving workbook", output_excel_file) try: wb.save(output_excel_file) except Exception as e: print("ERROR: Error when saving Excel file to", output_excel_file, "-", str(e)) sys.exit(1) ######## # Main # ######## # Download checklist if checklist_file: checklist_file_list = checklist_file.split(" ") # If --only-english parameter was supplied, take only the English version and remove duplicates if args.only_english: checklist_file_list = [file[:-8] + '.en.json' for file in checklist_file_list] checklist_file_list = list(set(checklist_file_list)) if args.verbose: print("DEBUG: new checklist file list:", str(checklist_file_list)) # If --find-all paramater was supplied, find all the languages for the checklist if args.find_all: new_file_list = [] for checklist_file in checklist_file_list: filedir = os.path.dirname(checklist_file) filebase = os.path.basename(checklist_file) filebase_noext = filebase[:-8] # Remove '.en.json' file_match_list = glob.glob(os.path.join(filedir, filebase_noext + '.*.json')) for checklist_match in file_match_list: # new_file_list.append(os.path.join(filedir, checklist_match)) new_file_list.append(checklist_match) checklist_file_list = list(set(new_file_list)) if args.verbose: print("DEBUG: new checklist file list:", str(checklist_file_list)) # Go over the list for checklist_file in checklist_file_list: if args.verbose: print("DEBUG: Opening checklist file", checklist_file) # Get JSON try: with open(checklist_file) as f: checklist_data = json.load(f) except Exception as e: print("ERROR: Error when processing JSON file", checklist_file, "-", str(e)) sys.exit(0) # Set input and output files input_excel_file = excel_file if args.output_excel_file: output_excel_file = args.output_excel_file elif args.output_name_is_input_name: if args.output_path: # Get filename without path and extension output_excel_file = os.path.splitext(os.path.basename(checklist_file))[0] + '.xlsx' output_excel_file = os.path.join(args.output_path, output_excel_file) else: # Just change the extension output_excel_file = os.path.splitext(checklist_file)[0] + '.xlsx' # Update spreadsheet update_excel_file(input_excel_file, output_excel_file, checklist_data) else: if technology: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/" + technology + "_checklist.en.json" else: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/lz_checklist.en.json" if args.verbose: print("DEBUG: Downloading checklist file from", checklist_url) response = requests.get(checklist_url) # If download was successful if response.status_code == 200: if args.verbose: print ("DEBUG: File {0} downloaded successfully".format(checklist_url)) try: # Deserialize JSON to object variable checklist_data = json.loads(response.text) except Exception as e: print("Error deserializing JSON content: {0}".format(str(e))) sys.exit(1) # Upload spreadsheet if args.output_excel_file: output_excel_file = args.output_excel_file else: output_excel_file = excel_file update_excel_file(excel_file, output_excel_file, checklist_data) scripts/translate.py METASEP import requests import os import argparse import sys import json import uuid # Variables translate_keys = ('description', 'name', 'category', 'subcategory', 'text', 'severity') translate_languages = ['es', 'ja', 'pt', 'ko'] # Get environment variables translator_endpoint = os.environ["AZURE_TRANSLATOR_ENDPOINT"] translator_region = os.environ["AZURE_TRANSLATOR_REGION"] translator_key = os.environ["AZURE_TRANSLATOR_SUBSCRIPTION_KEY"] translator_url = translator_endpoint + 'translate' # Get input arguments parser = argparse.ArgumentParser(description='Translate a JSON file') parser.add_argument('--input-file-name', dest='file_name_in', action='store', help='you need to supply file name where your JSON to be translated is located') parser.add_argument('--output-file-name', dest='file_name_out', action='store', help='you need to supply file name where the translated JSON will be saved') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() # Check we have all information if translator_endpoint and translator_region and translator_key: if args.verbose: print('DEBUG: environment variables retrieved successfully: {0}, {1}, {2}'.format(translator_endpoint, translator_region, translator_key)) else: print('ERROR: couldnt retrieve environment variables for translation') sys.exit(1) # Get JSON try: with open(args.file_name_in) as f: checklist = json.load(f) except Exception as e: print("ERROR: Error when processing JSON file", args.file_name_in, "-", str(e)) sys.exit(1) # Function to translate a single line of text to a single language def translate_text(text_to_translate, languages): if args.verbose: print('DEBUG: translating text "{0}" on {1}...'.format(text_to_translate, translator_url)) # If a single languages specified, convert to array if not type(languages) == list: languages = [languages] # Azure Translator parameters translator_params = { 'api-version': '3.0', 'from': 'en', 'to': languages } translator_headers = { 'Ocp-Apim-Subscription-Key': translator_key, 'Ocp-Apim-Subscription-Region': translator_region, 'Content-type': 'application/json', 'Accept': 'application/json', 'X-ClientTraceId': str(uuid.uuid4()) } translator_body = [{ 'text': text_to_translate }] if args.verbose: print ("DEBUG: sending body", str(translator_body)) print ("DEBUG: sending HTTP headers", str(translator_headers)) print ("DEBUG: sending parameters", str(translator_params)) try: request = requests.post(translator_url, params=translator_params, headers=translator_headers, json=translator_body) response = request.json() if args.verbose: print("DEBUG: translator response:") print(json.dumps(response, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))) return str(response[0]['translations'][0]['text']) except Exception as e: print("ERROR: Error in translation:", str(e)) # Go over all keys and translate them if required def translate_object(checklist_object, language): translated_object = checklist_object.copy() for (k, v) in translated_object.items(): if isinstance(v, list): translated_items = [] for list_item in v: translated_items.append(translate_object(list_item, language)) translated_object[k] = translated_items else: if k in translate_keys: # print("Found key", k, "and scalar value", v) translated_object[k] = translate_text(v, language) return translated_object ################ # Main # ################ if args.verbose: print("DEBUG: Starting translations for languages", str(translate_languages)) for using_language in translate_languages: print("INFO: Starting translation to", using_language) translated_checklist = translate_object(checklist, using_language) # If no output file was specified, use the input file, and append the language as extension before .json if not args.file_name_out: file_name_in_base = os.path.basename(args.file_name_in) file_name_in_dir = os.path.dirname(args.file_name_in) file_name_in_noext = file_name_in_base.split('.')[0] file_name_out = file_name_in_noext + '.' + using_language + '.json' file_name_out = os.path.join(file_name_in_dir, file_name_out) print("INFO: saving output file to", file_name_out) translated_checklist_string = json.dumps(translated_checklist, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': ')) with open(file_name_out, 'w', encoding='utf-8') as f: f.write(translated_checklist_string) f.close() # print(json.dumps(translated_checklist, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))) scripts/sort_checklist.py METASEP ################################################################################# # # This script sorts a specific checklist and saves it. # # Last updated: January 2023 # ################################################################################# import json import argparse import sys import requests # Get input arguments parser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results') parser.add_argument('--input-file', dest='input_file', action='store', help='You need to supply the name of the JSON file with the checklist to be filtered') parser.add_argument('--output-file', dest='output_file', action='store', help='You can optionally supply the name of a new JSON file that will be used to save the output. Otherwise the sorted checklist will replace the unused one') parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='do not save anything, only output to console (default: False)') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() if not args.input_file: print("ERROR: no input file specified, not doing anything") # Load the checklist try: with open(args.input_file) as f: checklist = json.load(f) except Exception as e: print("ERROR: Error when processing JSON file, nothing changed", args.input_file, "-", str(e)) # Sort the items per category and subcategory items = checklist['items'] items = sorted(items, key=lambda k: (k['category'],k["subcategory"])) checklist['items'] = items # If dry-run, show on screen if args.dry_run: print(json.dumps(checklist, indent=4)) # Saving output file if specified in the argument if not args.dry_run: if args.output_file: output_file = args.output_file else: output_file = args.input_file if args.verbose: print("DEBUG: saving output file to", output_file) checklist_string = json.dumps(checklist, indent=4) with open(output_file, 'w', encoding='utf-8') as f: f.write(checklist_string) f.close() scripts/compile_checklist.py METASEP ################################################################################# # # This script attempts to build a unified checklist out of all the different checklists # stored in this repo, and optionally filter it per design area. # # Last updated: June 2022 # ################################################################################# import json import argparse import sys import requests # Get input arguments parser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results') parser.add_argument('--output-file', dest='output_file', action='store', help='You can optionally supply the name of the JSON file that will be created. Otherwise no output will be generated') parser.add_argument('--category', dest='category_filter', action='store', help='You can optionally provide a category name as a filter') parser.add_argument('--checklist-name', dest='new_checklist_name', action='store', default='Combined checklist', help='You can optionally provide a category name as a filter') parser.add_argument('--print-categories', dest='print_categories', action='store_true', default=False, help='print the categories of the combined checklist (default: False)') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() if args.category_filter: category_filter = args.category_filter.lower() # Variables repo_contents_url = 'https://api.github.com/repos/azure/review-checklists/contents/checklists' # Get existing checklists in the repo response = requests.get(repo_contents_url) # If download was successful if response.status_code == 200: if args.verbose: print ("DEBUG: Github contents downloaded successfully from {0}".format(repo_contents_url)) try: content_data = json.loads(response.text) except Exception as e: print("Error deserializing JSON content: {0}".format(str(e))) sys.exit(1) # Get the list of checklist files checklist_urls = [] if content_data: for github_object in content_data: if github_object['name'][-7:] == 'en.json': checklist_urls.append(github_object['download_url']) else: print("Error deserializing JSON content from GitHub repository contents: {0}".format(str(e))) sys.exit(1) if args.verbose: print("DEBUG: {0} checklists found".format(str(len(checklist_urls)))) # Load all of the items in memory new_checklist = { 'items': [], 'status': [ {'name': 'Not verified', 'description': 'This check has not been looked at yet'}, {'name': 'Open', 'description': 'There is an action item associated to this check'}, {'name': 'Fulfilled', 'description': 'This check has been verified, and there are no further action items associated to it'}, {'name': 'Not required', 'description': 'Recommendation understood, but not needed by current requirements'}, {'name': 'N/A', 'description': 'Not applicable for current design'} ], 'severities': [ {'name': 'High'}, {'name': 'Medium'}, {'name': 'Low'} ], 'categories': [], 'metadata': { 'name': args.new_checklist_name } } for checklist_url in checklist_urls: if args.verbose: print("DEBUG: Downloading checklist file from", checklist_url) response = requests.get(checklist_url) if response.status_code == 200: if args.verbose: print ("DEBUG: File {0} downloaded successfully".format(checklist_url)) try: # Deserialize JSON to object variable checklist_data = json.loads(response.text) checklist_name = checklist_data['metadata']['name'] for item in checklist_data['items']: if checklist_name: item['checklist'] = checklist_name item_category = str(item['category']).lower() if not args.category_filter or item_category.__contains__(category_filter): new_checklist['items'].append(item) except Exception as e: print("Error deserializing JSON content: {0}".format(str(e))) sys.exit(1) if args.verbose: print("DEBUG: Resulting combined checklist has {0} items".format(str(len(new_checklist['items'])))) # Add the categories to the new checklist categories = [] for item in new_checklist['items']: category_name=item['checklist'] + '/' + item['category'] if not category_name in categories: categories.append(category_name) if args.verbose: print("DEBUG: {0} categories found".format(str(len(categories)))) for category in categories: new_checklist['categories'].append({'name': category}) if args.print_categories: print(category) # Saving output file if specified in the argument if args.output_file: if args.verbose: print("DEBUG: saving output file to", args.output_file) new_checklist_string = json.dumps(new_checklist) with open(args.output_file, 'w', encoding='utf-8') as f: f.write(new_checklist_string) f.close() scripts/checklist_graph_update.py METASEP ################################################################################# # # This is a study on two libraries to update Excel files: openpyxl and xlwings # This exercise has shown that openpyxl breaks the xlsx files in this repo (maybe # because of the macros, or the formulae), while xlwings works fine. # # This script reads a previously generated JSON file with the results of Azure # Resource Graph queries, and stores them in the 'Comments' column of a # spreadsheet. Both the JSON file and the spreadsheet file are supplied as # parameters. # # Last updated: March 2022 # ################################################################################# import json import argparse import sys from pandas import DataFrame from openpyxl import load_workbook import xlwings as xw # Get input arguments parser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results') parser.add_argument('--graph-file', dest='graph_file', action='store', help='You need to supply a JSON file containing the results of Azure Resource Graph Queries') parser.add_argument('--excel-file', dest='excel_file', action='store', help='You need to supply an Excel file where the query results will be stored') parser.add_argument('--mode', dest='mode', action='store', default="openpyxl", help='It can be either xlwings or openpyxl (default is openpyxl)') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() graph_file = args.graph_file excel_file = args.excel_file mode = args.mode # Constants guid_column_index = "K" comment_column_index = "G" sample_cell_index = 'A4' # Get JSON try: with open(graph_file) as f: graph_data = json.load(f) except Exception as e: print("ERROR: Error when processing JSON file", graph_file, "-", str(e)) sys.exit(1) # Load workbook try: if mode == 'openpyxl': if args.verbose: print("DEBUG: working with openpyxl library") wb = load_workbook(filename = excel_file) ws = wb['Checklist'] elif mode == 'xlwings': if args.verbose: print("DEBUG: working with xlwings library") wb = xw.Book(excel_file) ws = wb.sheets['Checklist'] else: print("ERROR: mode {0} not recognized".format(mode)) except Exception as e: print("ERROR: Error when opening Excel file", excel_file, "-", str(e)) sys.exit(1) # Print specific cell if args.verbose: print("DEBUG: looking at spreadsheet for", ws[sample_cell_index].value) # Get GUID column into a list if mode == 'openpyxl': guid_col = ws[guid_column_index] guid_col_values = [x.value for x in guid_col] if args.verbose: print("DEBUG: GUID column retrieved with", str(len(guid_col_values)), "values") elif mode == 'xlwings': guid_col_values = ws.range(guid_column_index + ":" + guid_column_index).value if args.verbose: print("DEBUG: GUID column retrieved with", str(len(guid_col_values)), "values") else: print("ERROR: mode {0} not recognized".format(mode)) sys.exit(1) # Go over all checks in the JSON file for check in graph_data['checks']: guid = check['guid'] arm_id = check['id'] compliant = check['compliant'] if (compliant == "false"): comment = "Non-compliant: {0}\n".format(arm_id) elif (compliant == "true"): comment = "Compliant: {0}\n".format(arm_id) else: print("ERROR: compliant status {0} not recognized".format(compliant)) # Find the guid in the list if guid in guid_col_values: row = guid_col_values.index(guid) cell_index = comment_column_index + str(row) print("DEBUG: updating cell", cell_index) if mode == 'openpyxl': ws[cell_index] = comment elif mode == 'xlwings': ws.range(cell_index).value = comment else: print("ERROR: could not find GUID {0} in the Excel list".format(guid)) # Saving file if mode == 'openpyxl': print("DEBUG: saving workbook", excel_file) try: wb.save(excel_file) except Exception as e: print("ERROR: Error when saving Excel file", excel_file, "-", str(e)) sys.exit(1) elif mode == 'xlwings': print("DEBUG: saving workbook", excel_file) try: wb.save() except Exception as e: print("ERROR: Error when saving Excel file", excel_file, "-", str(e)) sys.exit(1) else: print("ERROR: mode {0} not recognized".format(mode)) scripts/workbook_create.py METASEP
[ { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n graph_query = item.get(\"graph\")\r\n if graph_query:\r\n query_id += 1\r\n # Create new text\r\n new_text = block_text.copy()\r\n new_text['name'] = 'querytext' + str(query_id)\r\n new_text['content']['json'] = text\r\n if link:\r\n new_text['content']['json'] += \". Check [this link](\" + link + \") for further information.\"\r\n if training:\r\n new_text['content']['json'] += \". [This training](\" + training + \") can help to educate yourself on this.\"\r\n # Create new query\r\n new_query = block_query.copy()\r\n new_query['name'] = 'query' + str(query_id)\r\n new_query['content']['query'] = graph_query\r\n new_query['content']['size'] = query_size\r\n # Add text and query to the workbook\r\n category_id = category_dict[category]\r\n if args.verbose:\r\n print (\"DEBUG: Adding text and query to category ID {0}, workbook object name is {1}\".format(str(category_id), workbook['items'][category_id]['name']))\r\n new_new_text=json.loads(json.dumps(new_text.copy()))\r\n new_new_query=json.loads(json.dumps(new_query.copy()))\r\n workbook['items'][category_id]['content']['items'].append(new_new_text)\r\n workbook['items'][category_id]['content']['items'].append(new_new_query)\r\n\r\n # Dump the workbook to the output file or to console, if there was any query in the original checklist\r\n if query_id > 0:\r\n workbook_string = json.dumps(workbook, indent=4)\r\n if output_file:\r\n with open(output_file, 'w', encoding='utf-8') as f:\r\n f.write(workbook_string)\r\n f.close()\r\n else:\r\n print(workbook_string)\r\n else:\r\n print(\"INFO: sorry, the analyzed checklist did not contain any graph query\")\r\n\r\ndef get_output_file(checklist_file_or_url, is_file=True):\r\n if is_file:\r\n output_file = os.path.basename(checklist_file_or_url)\r\n else:\r\n output_file = checklist_file_or_url.split('/')[-1]\r\n if args.output_file:\r\n return args.output_file\r\n elif args.output_path:\r\n # Get filename without path and extension\r\n output_file = os.path.join(args.output_path, output_file)\r\n return os.path.splitext(output_file)[0] + '_workbook.json'\r\n else:\r\n output_file = None\r\n\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# First thing of all, load the building blocks\r", "type": "infile" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n graph_query = item.get(\"graph\")\r\n if graph_query:\r\n query_id += 1\r\n # Create new text\r\n new_text = block_text.copy()\r\n new_text['name'] = 'querytext' + str(query_id)\r\n new_text['content']['json'] = text\r\n if link:\r\n new_text['content']['json'] += \". Check [this link](\" + link + \") for further information.\"\r\n if training:\r\n new_text['content']['json'] += \". [This training](\" + training + \") can help to educate yourself on this.\"\r\n # Create new query\r\n new_query = block_query.copy()\r\n new_query['name'] = 'query' + str(query_id)\r\n new_query['content']['query'] = graph_query\r\n new_query['content']['size'] = query_size\r\n # Add text and query to the workbook\r\n category_id = category_dict[category]\r\n if args.verbose:\r\n print (\"DEBUG: Adding text and query to category ID {0}, workbook object name is {1}\".format(str(category_id), workbook['items'][category_id]['name']))\r\n new_new_text=json.loads(json.dumps(new_text.copy()))\r\n new_new_query=json.loads(json.dumps(new_query.copy()))\r\n workbook['items'][category_id]['content']['items'].append(new_new_text)\r\n workbook['items'][category_id]['content']['items'].append(new_new_query)\r\n\r\n # Dump the workbook to the output file or to console, if there was any query in the original checklist\r\n if query_id > 0:\r\n workbook_string = json.dumps(workbook, indent=4)\r\n if output_file:\r\n with open(output_file, 'w', encoding='utf-8') as f:\r\n f.write(workbook_string)\r\n f.close()\r\n else:\r\n print(workbook_string)\r\n else:\r\n print(\"INFO: sorry, the analyzed checklist did not contain any graph query\")\r\n\r\ndef get_output_file(checklist_file_or_url, is_file=True):\r\n if is_file:\r\n output_file = os.path.basename(checklist_file_or_url)\r\n else:\r\n output_file = checklist_file_or_url.split('/')[-1]\r\n if args.output_file:\r\n return args.output_file\r\n elif args.output_path:\r\n # Get filename without path and extension\r\n output_file = os.path.join(args.output_path, output_file)\r\n return os.path.splitext(output_file)[0] + '_workbook.json'\r\n else:\r\n output_file = None\r\n\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# First thing of all, load the building blocks\r\nload_building_blocks()\r\nif args.verbose:\r\n print (\"DEBUG: building blocks variables intialized:\")\r\n print (\"DEBUG: - Workbook: {0}\".format(str(block_workbook)))\r\n print (\"DEBUG: - Link: {0}\".format(str(block_link)))\r\n print (\"DEBUG: - Query: {0}\".format(str(block_query)))\r\n\r\n# Download checklist or process from local file\r\nif checklist_file:\r\n checklist_file_list = checklist_file.split(\" \")\r\n # If --only-english parameter was supplied, take only the English version and remove duplicates\r\n if args.only_english:\r\n checklist_file_list = [file[:-8] + '.en.json' for file in checklist_file_list]\r\n checklist_file_list = list(set(checklist_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # If --find-all paramater was supplied, find all the languages for the checklist\r\n if args.find_all:\r\n new_file_list = []\r\n for checklist_file in checklist_file_list:\r\n filedir = os.path.dirname(checklist_file)\r\n filebase = os.path.basename(checklist_file)\r\n filebase_noext = filebase[:-8] # Remove '.en.json'\r\n file_match_list = glob.glob(os.path.join(filedir, filebase_noext + '.*.json'))\r\n for checklist_match in file_match_list:\r\n # new_file_list.append(os.path.join(filedir, checklist_match))\r\n new_file_list.append(checklist_match)\r\n checklist_file_list = list(set(new_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # Go over the list(s)\r\n for checklist_file in checklist_file_list:\r\n if args.verbose:\r\n print(\"DEBUG: Opening checklist file\", checklist_file)\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Set output files\r\n output_file = get_output_file(checklist_file, is_file=True)\r\n # Generate workbook\r\n generate_workbook(output_file, checklist_data)\r\nelse:\r\n # If no input files specified, fetch the latest from Github...\r\n if technology:\r\n checklist_url = \"https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/\" + technology + \"_checklist.en.json\"\r\n else:\r\n checklist_url = \"https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/lz_checklist.en.json\"\r\n if args.verbose:\r\n print(\"DEBUG: Downloading checklist file from\", checklist_url)\r\n response = requests.get(checklist_url)\r\n # If download was successful\r\n if response.status_code == 200:\r\n if args.verbose:\r\n print (\"DEBUG: File {0} downloaded successfully\".format(checklist_url))\r\n try:\r\n # Deserialize JSON to object variable\r\n checklist_data = json.loads(response.text)\r\n except Exception as e:\r\n print(\"Error deserializing JSON content: {0}\".format(str(e)))\r\n sys.exit(1)\r\n # Set output files\r", "type": "infile" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n graph_query = item.get(\"graph\")\r\n if graph_query:\r\n query_id += 1\r\n # Create new text\r\n new_text = block_text.copy()\r\n new_text['name'] = 'querytext' + str(query_id)\r\n new_text['content']['json'] = text\r\n if link:\r\n new_text['content']['json'] += \". Check [this link](\" + link + \") for further information.\"\r\n if training:\r\n new_text['content']['json'] += \". [This training](\" + training + \") can help to educate yourself on this.\"\r\n # Create new query\r\n new_query = block_query.copy()\r\n new_query['name'] = 'query' + str(query_id)\r\n new_query['content']['query'] = graph_query\r\n new_query['content']['size'] = query_size\r\n # Add text and query to the workbook\r\n category_id = category_dict[category]\r\n if args.verbose:\r\n print (\"DEBUG: Adding text and query to category ID {0}, workbook object name is {1}\".format(str(category_id), workbook['items'][category_id]['name']))\r\n new_new_text=json.loads(json.dumps(new_text.copy()))\r\n new_new_query=json.loads(json.dumps(new_query.copy()))\r\n workbook['items'][category_id]['content']['items'].append(new_new_text)\r\n workbook['items'][category_id]['content']['items'].append(new_new_query)\r\n\r\n # Dump the workbook to the output file or to console, if there was any query in the original checklist\r\n if query_id > 0:\r\n workbook_string = json.dumps(workbook, indent=4)\r\n if output_file:\r\n with open(output_file, 'w', encoding='utf-8') as f:\r\n f.write(workbook_string)\r\n f.close()\r\n else:\r\n print(workbook_string)\r\n else:\r\n print(\"INFO: sorry, the analyzed checklist did not contain any graph query\")\r\n\r\ndef get_output_file(checklist_file_or_url, is_file=True):\r\n if is_file:\r\n output_file = os.path.basename(checklist_file_or_url)\r\n else:\r\n output_file = checklist_file_or_url.split('/')[-1]\r\n if args.output_file:\r\n return args.output_file\r\n elif args.output_path:\r\n # Get filename without path and extension\r\n output_file = os.path.join(args.output_path, output_file)\r\n return os.path.splitext(output_file)[0] + '_workbook.json'\r\n else:\r\n output_file = None\r\n\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# First thing of all, load the building blocks\r\nload_building_blocks()\r\nif args.verbose:\r\n print (\"DEBUG: building blocks variables intialized:\")\r\n print (\"DEBUG: - Workbook: {0}\".format(str(block_workbook)))\r\n print (\"DEBUG: - Link: {0}\".format(str(block_link)))\r\n print (\"DEBUG: - Query: {0}\".format(str(block_query)))\r\n\r\n# Download checklist or process from local file\r\nif checklist_file:\r\n checklist_file_list = checklist_file.split(\" \")\r\n # If --only-english parameter was supplied, take only the English version and remove duplicates\r\n if args.only_english:\r\n checklist_file_list = [file[:-8] + '.en.json' for file in checklist_file_list]\r\n checklist_file_list = list(set(checklist_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # If --find-all paramater was supplied, find all the languages for the checklist\r\n if args.find_all:\r\n new_file_list = []\r\n for checklist_file in checklist_file_list:\r\n filedir = os.path.dirname(checklist_file)\r\n filebase = os.path.basename(checklist_file)\r\n filebase_noext = filebase[:-8] # Remove '.en.json'\r\n file_match_list = glob.glob(os.path.join(filedir, filebase_noext + '.*.json'))\r\n for checklist_match in file_match_list:\r\n # new_file_list.append(os.path.join(filedir, checklist_match))\r\n new_file_list.append(checklist_match)\r\n checklist_file_list = list(set(new_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # Go over the list(s)\r\n for checklist_file in checklist_file_list:\r\n if args.verbose:\r\n print(\"DEBUG: Opening checklist file\", checklist_file)\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Set output files\r\n output_file = get_output_file(checklist_file, is_file=True)\r\n # Generate workbook\r", "type": "infile" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n graph_query = item.get(\"graph\")\r\n if graph_query:\r\n query_id += 1\r\n # Create new text\r\n new_text = block_text.copy()\r\n new_text['name'] = 'querytext' + str(query_id)\r\n new_text['content']['json'] = text\r\n if link:\r\n new_text['content']['json'] += \". Check [this link](\" + link + \") for further information.\"\r\n if training:\r\n new_text['content']['json'] += \". [This training](\" + training + \") can help to educate yourself on this.\"\r\n # Create new query\r\n new_query = block_query.copy()\r\n new_query['name'] = 'query' + str(query_id)\r\n new_query['content']['query'] = graph_query\r\n new_query['content']['size'] = query_size\r\n # Add text and query to the workbook\r\n category_id = category_dict[category]\r\n if args.verbose:\r\n print (\"DEBUG: Adding text and query to category ID {0}, workbook object name is {1}\".format(str(category_id), workbook['items'][category_id]['name']))\r\n new_new_text=json.loads(json.dumps(new_text.copy()))\r\n new_new_query=json.loads(json.dumps(new_query.copy()))\r\n workbook['items'][category_id]['content']['items'].append(new_new_text)\r\n workbook['items'][category_id]['content']['items'].append(new_new_query)\r\n\r\n # Dump the workbook to the output file or to console, if there was any query in the original checklist\r\n if query_id > 0:\r\n workbook_string = json.dumps(workbook, indent=4)\r\n if output_file:\r\n with open(output_file, 'w', encoding='utf-8') as f:\r\n f.write(workbook_string)\r\n f.close()\r\n else:\r\n print(workbook_string)\r\n else:\r\n print(\"INFO: sorry, the analyzed checklist did not contain any graph query\")\r\n\r\ndef get_output_file(checklist_file_or_url, is_file=True):\r\n if is_file:\r\n output_file = os.path.basename(checklist_file_or_url)\r\n else:\r\n output_file = checklist_file_or_url.split('/')[-1]\r\n if args.output_file:\r\n return args.output_file\r\n elif args.output_path:\r\n # Get filename without path and extension\r\n output_file = os.path.join(args.output_path, output_file)\r\n return os.path.splitext(output_file)[0] + '_workbook.json'\r\n else:\r\n output_file = None\r\n\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# First thing of all, load the building blocks\r\nload_building_blocks()\r\nif args.verbose:\r\n print (\"DEBUG: building blocks variables intialized:\")\r\n print (\"DEBUG: - Workbook: {0}\".format(str(block_workbook)))\r\n print (\"DEBUG: - Link: {0}\".format(str(block_link)))\r\n print (\"DEBUG: - Query: {0}\".format(str(block_query)))\r\n\r\n# Download checklist or process from local file\r\nif checklist_file:\r\n checklist_file_list = checklist_file.split(\" \")\r\n # If --only-english parameter was supplied, take only the English version and remove duplicates\r\n if args.only_english:\r\n checklist_file_list = [file[:-8] + '.en.json' for file in checklist_file_list]\r\n checklist_file_list = list(set(checklist_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # If --find-all paramater was supplied, find all the languages for the checklist\r\n if args.find_all:\r\n new_file_list = []\r\n for checklist_file in checklist_file_list:\r\n filedir = os.path.dirname(checklist_file)\r\n filebase = os.path.basename(checklist_file)\r\n filebase_noext = filebase[:-8] # Remove '.en.json'\r\n file_match_list = glob.glob(os.path.join(filedir, filebase_noext + '.*.json'))\r\n for checklist_match in file_match_list:\r\n # new_file_list.append(os.path.join(filedir, checklist_match))\r\n new_file_list.append(checklist_match)\r\n checklist_file_list = list(set(new_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # Go over the list(s)\r\n for checklist_file in checklist_file_list:\r\n if args.verbose:\r\n print(\"DEBUG: Opening checklist file\", checklist_file)\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Set output files\r", "type": "infile" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n graph_query = item.get(\"graph\")\r\n if graph_query:\r\n query_id += 1\r\n # Create new text\r\n new_text = block_text.copy()\r\n new_text['name'] = 'querytext' + str(query_id)\r\n new_text['content']['json'] = text\r\n if link:\r\n new_text['content']['json'] += \". Check [this link](\" + link + \") for further information.\"\r\n if training:\r\n new_text['content']['json'] += \". [This training](\" + training + \") can help to educate yourself on this.\"\r\n # Create new query\r\n new_query = block_query.copy()\r\n new_query['name'] = 'query' + str(query_id)\r\n new_query['content']['query'] = graph_query\r\n new_query['content']['size'] = query_size\r\n # Add text and query to the workbook\r\n category_id = category_dict[category]\r\n if args.verbose:\r\n print (\"DEBUG: Adding text and query to category ID {0}, workbook object name is {1}\".format(str(category_id), workbook['items'][category_id]['name']))\r\n new_new_text=json.loads(json.dumps(new_text.copy()))\r\n new_new_query=json.loads(json.dumps(new_query.copy()))\r\n workbook['items'][category_id]['content']['items'].append(new_new_text)\r\n workbook['items'][category_id]['content']['items'].append(new_new_query)\r\n\r\n # Dump the workbook to the output file or to console, if there was any query in the original checklist\r\n if query_id > 0:\r\n workbook_string = json.dumps(workbook, indent=4)\r\n if output_file:\r\n with open(output_file, 'w', encoding='utf-8') as f:\r\n f.write(workbook_string)\r\n f.close()\r\n else:\r\n print(workbook_string)\r\n else:\r\n print(\"INFO: sorry, the analyzed checklist did not contain any graph query\")\r\n\r\ndef get_output_file(checklist_file_or_url, is_file=True):\r\n if is_file:\r\n output_file = os.path.basename(checklist_file_or_url)\r\n else:\r\n output_file = checklist_file_or_url.split('/')[-1]\r\n if args.output_file:\r\n return args.output_file\r\n elif args.output_path:\r\n # Get filename without path and extension\r\n output_file = os.path.join(args.output_path, output_file)\r\n return os.path.splitext(output_file)[0] + '_workbook.json'\r\n else:\r\n output_file = None\r\n\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# First thing of all, load the building blocks\r\nload_building_blocks()\r\nif args.verbose:\r\n print (\"DEBUG: building blocks variables intialized:\")\r\n print (\"DEBUG: - Workbook: {0}\".format(str(block_workbook)))\r\n print (\"DEBUG: - Link: {0}\".format(str(block_link)))\r\n print (\"DEBUG: - Query: {0}\".format(str(block_query)))\r\n\r\n# Download checklist or process from local file\r\nif checklist_file:\r\n checklist_file_list = checklist_file.split(\" \")\r\n # If --only-english parameter was supplied, take only the English version and remove duplicates\r\n if args.only_english:\r\n checklist_file_list = [file[:-8] + '.en.json' for file in checklist_file_list]\r\n checklist_file_list = list(set(checklist_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # If --find-all paramater was supplied, find all the languages for the checklist\r\n if args.find_all:\r\n new_file_list = []\r\n for checklist_file in checklist_file_list:\r\n filedir = os.path.dirname(checklist_file)\r\n filebase = os.path.basename(checklist_file)\r\n filebase_noext = filebase[:-8] # Remove '.en.json'\r\n file_match_list = glob.glob(os.path.join(filedir, filebase_noext + '.*.json'))\r\n for checklist_match in file_match_list:\r\n # new_file_list.append(os.path.join(filedir, checklist_match))\r\n new_file_list.append(checklist_match)\r\n checklist_file_list = list(set(new_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # Go over the list(s)\r\n for checklist_file in checklist_file_list:\r\n if args.verbose:\r\n print(\"DEBUG: Opening checklist file\", checklist_file)\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n graph_query = item.get(\"graph\")\r\n if graph_query:\r\n query_id += 1\r\n # Create new text\r\n new_text = block_text.copy()\r\n new_text['name'] = 'querytext' + str(query_id)\r\n new_text['content']['json'] = text\r\n if link:\r\n new_text['content']['json'] += \". Check [this link](\" + link + \") for further information.\"\r\n if training:\r\n new_text['content']['json'] += \". [This training](\" + training + \") can help to educate yourself on this.\"\r\n # Create new query\r\n new_query = block_query.copy()\r\n new_query['name'] = 'query' + str(query_id)\r\n new_query['content']['query'] = graph_query\r\n new_query['content']['size'] = query_size\r\n # Add text and query to the workbook\r\n category_id = category_dict[category]\r\n if args.verbose:\r\n print (\"DEBUG: Adding text and query to category ID {0}, workbook object name is {1}\".format(str(category_id), workbook['items'][category_id]['name']))\r\n new_new_text=json.loads(json.dumps(new_text.copy()))\r\n new_new_query=json.loads(json.dumps(new_query.copy()))\r\n workbook['items'][category_id]['content']['items'].append(new_new_text)\r\n workbook['items'][category_id]['content']['items'].append(new_new_query)\r\n\r\n # Dump the workbook to the output file or to console, if there was any query in the original checklist\r\n if query_id > 0:\r\n workbook_string = json.dumps(workbook, indent=4)\r\n if output_file:\r\n with open(output_file, 'w', encoding='utf-8') as f:\r\n f.write(workbook_string)\r\n f.close()\r\n else:\r\n print(workbook_string)\r\n else:\r\n print(\"INFO: sorry, the analyzed checklist did not contain any graph query\")\r\n\r\ndef get_output_file(checklist_file_or_url, is_file=True):\r\n if is_file:\r\n output_file = os.path.basename(checklist_file_or_url)\r\n else:\r\n output_file = checklist_file_or_url.split('/')[-1]\r\n if args.output_file:\r\n return args.output_file\r\n elif args.output_path:\r\n # Get filename without path and extension\r\n output_file = os.path.join(args.output_path, output_file)\r\n return os.path.splitext(output_file)[0] + '_workbook.json'\r\n else:\r\n output_file = None\r\n\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# First thing of all, load the building blocks\r\nload_building_blocks()\r\nif args.verbose:\r\n print (\"DEBUG: building blocks variables intialized:\")\r\n print (\"DEBUG: - Workbook: {0}\".format(str(block_workbook)))\r\n print (\"DEBUG: - Link: {0}\".format(str(block_link)))\r\n print (\"DEBUG: - Query: {0}\".format(str(block_query)))\r\n\r\n# Download checklist or process from local file\r\nif checklist_file:\r\n checklist_file_list = checklist_file.split(\" \")\r\n # If --only-english parameter was supplied, take only the English version and remove duplicates\r\n if args.only_english:\r\n checklist_file_list = [file[:-8] + '.en.json' for file in checklist_file_list]\r\n checklist_file_list = list(set(checklist_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # If --find-all paramater was supplied, find all the languages for the checklist\r\n if args.find_all:\r\n new_file_list = []\r\n for checklist_file in checklist_file_list:\r\n filedir = os.path.dirname(checklist_file)\r\n filebase = os.path.basename(checklist_file)\r\n filebase_noext = filebase[:-8] # Remove '.en.json'\r\n file_match_list = glob.glob(os.path.join(filedir, filebase_noext + '.*.json'))\r\n for checklist_match in file_match_list:\r\n # new_file_list.append(os.path.join(filedir, checklist_match))\r\n new_file_list.append(checklist_match)\r\n checklist_file_list = list(set(new_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # Go over the list(s)\r\n for checklist_file in checklist_file_list:\r\n if args.verbose:\r\n print(\"DEBUG: Opening checklist file\", checklist_file)\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Set output files\r\n output_file = get_output_file(checklist_file, is_file=True)\r\n # Generate workbook\r\n generate_workbook(output_file, checklist_data)\r\nelse:\r\n # If no input files specified, fetch the latest from Github...\r\n if technology:\r\n checklist_url = \"https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/\" + technology + \"_checklist.en.json\"\r\n else:\r\n checklist_url = \"https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/lz_checklist.en.json\"\r\n if args.verbose:\r\n print(\"DEBUG: Downloading checklist file from\", checklist_url)\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r", "type": "non_informative" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n graph_query = item.get(\"graph\")\r\n if graph_query:\r\n query_id += 1\r\n # Create new text\r\n new_text = block_text.copy()\r\n new_text['name'] = 'querytext' + str(query_id)\r\n new_text['content']['json'] = text\r\n if link:\r\n new_text['content']['json'] += \". Check [this link](\" + link + \") for further information.\"\r\n if training:\r\n new_text['content']['json'] += \". [This training](\" + training + \") can help to educate yourself on this.\"\r\n # Create new query\r\n new_query = block_query.copy()\r\n new_query['name'] = 'query' + str(query_id)\r\n new_query['content']['query'] = graph_query\r\n new_query['content']['size'] = query_size\r\n # Add text and query to the workbook\r\n category_id = category_dict[category]\r\n if args.verbose:\r\n print (\"DEBUG: Adding text and query to category ID {0}, workbook object name is {1}\".format(str(category_id), workbook['items'][category_id]['name']))\r\n new_new_text=json.loads(json.dumps(new_text.copy()))\r\n new_new_query=json.loads(json.dumps(new_query.copy()))\r\n workbook['items'][category_id]['content']['items'].append(new_new_text)\r\n workbook['items'][category_id]['content']['items'].append(new_new_query)\r\n\r\n # Dump the workbook to the output file or to console, if there was any query in the original checklist\r\n if query_id > 0:\r\n workbook_string = json.dumps(workbook, indent=4)\r\n if output_file:\r\n with open(output_file, 'w', encoding='utf-8') as f:\r\n f.write(workbook_string)\r\n f.close()\r\n else:\r\n print(workbook_string)\r\n else:\r\n print(\"INFO: sorry, the analyzed checklist did not contain any graph query\")\r\n\r\ndef get_output_file(checklist_file_or_url, is_file=True):\r\n if is_file:\r\n output_file = os.path.basename(checklist_file_or_url)\r\n else:\r\n output_file = checklist_file_or_url.split('/')[-1]\r\n if args.output_file:\r\n return args.output_file\r\n elif args.output_path:\r\n # Get filename without path and extension\r\n output_file = os.path.join(args.output_path, output_file)\r\n return os.path.splitext(output_file)[0] + '_workbook.json'\r\n else:\r\n output_file = None\r\n\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# First thing of all, load the building blocks\r\nload_building_blocks()\r\nif args.verbose:\r\n print (\"DEBUG: building blocks variables intialized:\")\r\n print (\"DEBUG: - Workbook: {0}\".format(str(block_workbook)))\r\n print (\"DEBUG: - Link: {0}\".format(str(block_link)))\r\n print (\"DEBUG: - Query: {0}\".format(str(block_query)))\r\n\r\n# Download checklist or process from local file\r\nif checklist_file:\r\n checklist_file_list = checklist_file.split(\" \")\r\n # If --only-english parameter was supplied, take only the English version and remove duplicates\r\n if args.only_english:\r\n checklist_file_list = [file[:-8] + '.en.json' for file in checklist_file_list]\r\n checklist_file_list = list(set(checklist_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # If --find-all paramater was supplied, find all the languages for the checklist\r\n if args.find_all:\r\n new_file_list = []\r\n for checklist_file in checklist_file_list:\r\n filedir = os.path.dirname(checklist_file)\r\n filebase = os.path.basename(checklist_file)\r\n filebase_noext = filebase[:-8] # Remove '.en.json'\r\n file_match_list = glob.glob(os.path.join(filedir, filebase_noext + '.*.json'))\r\n for checklist_match in file_match_list:\r\n # new_file_list.append(os.path.join(filedir, checklist_match))\r\n new_file_list.append(checklist_match)\r\n checklist_file_list = list(set(new_file_list))\r\n if args.verbose:\r", "type": "non_informative" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n graph_query = item.get(\"graph\")\r\n if graph_query:\r\n query_id += 1\r\n # Create new text\r\n new_text = block_text.copy()\r\n new_text['name'] = 'querytext' + str(query_id)\r\n new_text['content']['json'] = text\r\n if link:\r\n new_text['content']['json'] += \". Check [this link](\" + link + \") for further information.\"\r\n if training:\r\n new_text['content']['json'] += \". [This training](\" + training + \") can help to educate yourself on this.\"\r\n # Create new query\r\n new_query = block_query.copy()\r\n new_query['name'] = 'query' + str(query_id)\r\n new_query['content']['query'] = graph_query\r\n new_query['content']['size'] = query_size\r\n # Add text and query to the workbook\r\n category_id = category_dict[category]\r\n if args.verbose:\r\n print (\"DEBUG: Adding text and query to category ID {0}, workbook object name is {1}\".format(str(category_id), workbook['items'][category_id]['name']))\r\n new_new_text=json.loads(json.dumps(new_text.copy()))\r\n new_new_query=json.loads(json.dumps(new_query.copy()))\r\n workbook['items'][category_id]['content']['items'].append(new_new_text)\r\n workbook['items'][category_id]['content']['items'].append(new_new_query)\r\n\r\n # Dump the workbook to the output file or to console, if there was any query in the original checklist\r\n if query_id > 0:\r\n workbook_string = json.dumps(workbook, indent=4)\r\n if output_file:\r\n with open(output_file, 'w', encoding='utf-8') as f:\r\n f.write(workbook_string)\r\n f.close()\r\n else:\r\n print(workbook_string)\r\n else:\r\n print(\"INFO: sorry, the analyzed checklist did not contain any graph query\")\r\n\r\ndef get_output_file(checklist_file_or_url, is_file=True):\r\n if is_file:\r\n output_file = os.path.basename(checklist_file_or_url)\r\n else:\r\n output_file = checklist_file_or_url.split('/')[-1]\r\n if args.output_file:\r\n return args.output_file\r\n elif args.output_path:\r\n # Get filename without path and extension\r\n output_file = os.path.join(args.output_path, output_file)\r\n return os.path.splitext(output_file)[0] + '_workbook.json'\r\n else:\r\n output_file = None\r\n\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# First thing of all, load the building blocks\r\nload_building_blocks()\r\nif args.verbose:\r\n print (\"DEBUG: building blocks variables intialized:\")\r\n print (\"DEBUG: - Workbook: {0}\".format(str(block_workbook)))\r\n print (\"DEBUG: - Link: {0}\".format(str(block_link)))\r", "type": "non_informative" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r", "type": "non_informative" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n graph_query = item.get(\"graph\")\r\n if graph_query:\r\n query_id += 1\r\n # Create new text\r\n new_text = block_text.copy()\r\n new_text['name'] = 'querytext' + str(query_id)\r\n new_text['content']['json'] = text\r\n if link:\r\n new_text['content']['json'] += \". Check [this link](\" + link + \") for further information.\"\r\n if training:\r\n new_text['content']['json'] += \". [This training](\" + training + \") can help to educate yourself on this.\"\r\n # Create new query\r\n new_query = block_query.copy()\r\n new_query['name'] = 'query' + str(query_id)\r\n new_query['content']['query'] = graph_query\r\n new_query['content']['size'] = query_size\r\n # Add text and query to the workbook\r\n category_id = category_dict[category]\r\n if args.verbose:\r\n print (\"DEBUG: Adding text and query to category ID {0}, workbook object name is {1}\".format(str(category_id), workbook['items'][category_id]['name']))\r\n new_new_text=json.loads(json.dumps(new_text.copy()))\r\n new_new_query=json.loads(json.dumps(new_query.copy()))\r\n workbook['items'][category_id]['content']['items'].append(new_new_text)\r\n workbook['items'][category_id]['content']['items'].append(new_new_query)\r\n\r\n # Dump the workbook to the output file or to console, if there was any query in the original checklist\r\n if query_id > 0:\r\n workbook_string = json.dumps(workbook, indent=4)\r\n if output_file:\r\n with open(output_file, 'w', encoding='utf-8') as f:\r\n f.write(workbook_string)\r\n f.close()\r\n else:\r\n print(workbook_string)\r\n else:\r\n print(\"INFO: sorry, the analyzed checklist did not contain any graph query\")\r\n\r\ndef get_output_file(checklist_file_or_url, is_file=True):\r\n if is_file:\r\n output_file = os.path.basename(checklist_file_or_url)\r\n else:\r\n output_file = checklist_file_or_url.split('/')[-1]\r\n if args.output_file:\r\n return args.output_file\r\n elif args.output_path:\r\n # Get filename without path and extension\r\n output_file = os.path.join(args.output_path, output_file)\r\n return os.path.splitext(output_file)[0] + '_workbook.json'\r\n else:\r\n output_file = None\r\n\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# First thing of all, load the building blocks\r\nload_building_blocks()\r\nif args.verbose:\r\n print (\"DEBUG: building blocks variables intialized:\")\r\n print (\"DEBUG: - Workbook: {0}\".format(str(block_workbook)))\r\n print (\"DEBUG: - Link: {0}\".format(str(block_link)))\r\n print (\"DEBUG: - Query: {0}\".format(str(block_query)))\r\n\r\n# Download checklist or process from local file\r\nif checklist_file:\r\n checklist_file_list = checklist_file.split(\" \")\r\n # If --only-english parameter was supplied, take only the English version and remove duplicates\r\n if args.only_english:\r\n checklist_file_list = [file[:-8] + '.en.json' for file in checklist_file_list]\r\n checklist_file_list = list(set(checklist_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # If --find-all paramater was supplied, find all the languages for the checklist\r\n if args.find_all:\r\n new_file_list = []\r\n for checklist_file in checklist_file_list:\r\n filedir = os.path.dirname(checklist_file)\r\n filebase = os.path.basename(checklist_file)\r\n filebase_noext = filebase[:-8] # Remove '.en.json'\r\n file_match_list = glob.glob(os.path.join(filedir, filebase_noext + '.*.json'))\r\n for checklist_match in file_match_list:\r\n # new_file_list.append(os.path.join(filedir, checklist_match))\r\n new_file_list.append(checklist_match)\r\n checklist_file_list = list(set(new_file_list))\r\n if args.verbose:\r\n print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r\n # Go over the list(s)\r\n for checklist_file in checklist_file_list:\r\n if args.verbose:\r\n print(\"DEBUG: Opening checklist file\", checklist_file)\r\n # Get JSON\r", "type": "random" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r", "type": "random" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r\n new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r\n new_section['content']['items'][0]['name'] = 'category' + str(category_id) + 'title'\r\n # Add link and query to workbook\r\n # if args.verbose:\r\n # print()\r\n # print (\"DEBUG: Adding link: {0}\".format(json.dumps(new_link)))\r\n # print (\"DEBUG: Adding section: {0}\".format(json.dumps(new_section)))\r\n # print(\"DEBUG: Workbook so far: {0}\".format(json.dumps(workbook)))\r\n workbook['items'][1]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :(\r\n # Add section to workbook\r\n new_new_section=json.loads(json.dumps(new_section.copy()))\r\n workbook['items'].append(new_new_section)\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n graph_query = item.get(\"graph\")\r\n if graph_query:\r\n query_id += 1\r\n # Create new text\r\n new_text = block_text.copy()\r\n new_text['name'] = 'querytext' + str(query_id)\r\n new_text['content']['json'] = text\r\n if link:\r\n new_text['content']['json'] += \". Check [this link](\" + link + \") for further information.\"\r\n if training:\r\n new_text['content']['json'] += \". [This training](\" + training + \") can help to educate yourself on this.\"\r\n # Create new query\r\n new_query = block_query.copy()\r\n new_query['name'] = 'query' + str(query_id)\r\n new_query['content']['query'] = graph_query\r\n new_query['content']['size'] = query_size\r\n # Add text and query to the workbook\r\n category_id = category_dict[category]\r\n if args.verbose:\r\n print (\"DEBUG: Adding text and query to category ID {0}, workbook object name is {1}\".format(str(category_id), workbook['items'][category_id]['name']))\r\n new_new_text=json.loads(json.dumps(new_text.copy()))\r\n new_new_query=json.loads(json.dumps(new_query.copy()))\r\n workbook['items'][category_id]['content']['items'].append(new_new_text)\r\n workbook['items'][category_id]['content']['items'].append(new_new_query)\r\n\r\n # Dump the workbook to the output file or to console, if there was any query in the original checklist\r\n if query_id > 0:\r\n workbook_string = json.dumps(workbook, indent=4)\r", "type": "random" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r", "type": "random" }, { "content": "######################################################################\r\n#\r\n# This script reads the checklist items from the latest checklist file\r\n# in Github (or from a local file) and generates an Azure Monitor\r\n# workbook in JSON format.\r\n# \r\n# Last updated: February 2023\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport uuid\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist')\r\nparser.add_argument('--checklist-file', dest='checklist_file', action='store',\r\n help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github')\r\nparser.add_argument('--only-english', dest='only_english', action='store_true', default=False,\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--find-all', dest='find_all', action='store_true', default=False,\r\n help='if checklist files are specified, find all the languages for the given checklists (default: False)')\r\nparser.add_argument('--technology', dest='technology', action='store',\r\n help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github')\r\nparser.add_argument('--output-file', dest='output_file', action='store',\r\n help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place')\r\nparser.add_argument('--output-path', dest='output_path', action='store',\r\n help='Folder where to store the results (using the same name as the input_file)')\r\nparser.add_argument('--blocks-path', dest='blocks_path', action='store',\r\n help='Folder where the building blocks to build the workbook are stored)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\nchecklist_file = args.checklist_file\r\ntechnology = args.technology\r\n\r\nblock_workbook = None\r\nblock_link = None\r\nblock_section = None\r\nblock_query = None\r\nblock_text = None\r\n\r\nquery_size = 4 # 0: medium, 1: small, 4: tiny\r\n\r\n# Workbook building blocks\r\ndef load_building_blocks():\r\n\r\n # Define the blocks as global variables\r\n global block_workbook\r\n global block_link\r\n global block_section\r\n global block_query\r\n global block_text\r\n\r\n # Set folder where to load from\r\n if args.blocks_path:\r\n blocks_path = args.blocks_path\r\n if args.verbose:\r\n print (\"DEBUG: Setting building block folder to {0}\".format(blocks_path))\r\n else:\r\n print(\"ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.\")\r\n sys.exit(1)\r\n\r\n # Load initial workbook building block\r\n block_file = os.path.join(blocks_path, 'block_workbook.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_workbook = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load link building block\r\n block_file = os.path.join(blocks_path, 'block_link.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_link = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load itemgroup (aka section) building block\r\n block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_section = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load query building block\r\n block_file = os.path.join(blocks_path, 'block_query.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_query = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n # Load text building block\r\n block_file = os.path.join(blocks_path, 'block_text.json')\r\n if args.verbose:\r\n print (\"DEBUG: Loading file {0}...\".format(block_file))\r\n try:\r\n with open(block_file) as f:\r\n block_text = json.load(f)\r\n except Exception as e:\r\n print(\"ERROR: Error when opening JSON workbook building block\", block_file, \"-\", str(e))\r\n sys.exit(0)\r\n\r\n# Main function to generate the workbook JSON\r\ndef generate_workbook(output_file, checklist_data):\r\n\r\n # Initialize an empty workbook\r\n workbook = block_workbook\r\n\r\n # Generate one tab in the workbook for each category\r\n category_id = 0\r\n query_id = 0\r\n category_dict = {}\r\n for item in checklist_data.get(\"categories\"):\r\n category_title = item.get(\"name\")\r\n category_id += 1\r\n category_dict[category_title] = category_id + 1 # We will use this dict later to know where to put each query\r\n # Create new link\r\n new_link = block_link.copy()\r\n new_link['id'] = str(uuid.uuid4()) # RANDOM GUID\r\n new_link['linkLabel'] = category_title\r\n new_link['subTarget'] = 'category' + str(category_id)\r\n new_link['preText'] = category_title\r\n # Create new section\r\n new_section = block_section.copy()\r\n new_section['name'] = 'category' + str(category_id)\r\n new_section['conditionalVisibility']['value'] = 'category' + str(category_id)\r", "type": "random" } ]
[ "load_building_blocks()\r", " output_file = get_output_file(checklist_url, is_file=False)\r", " generate_workbook(output_file, checklist_data)\r", " output_file = get_output_file(checklist_file, is_file=True)\r", " for item in checklist_data.get(\"categories\"):\r", " category_title = item.get(\"name\")\r", " checklist_data = json.load(f)\r", " response = requests.get(checklist_url)\r", " for item in checklist_data.get(\"items\"):\r", " guid = item.get(\"guid\")\r", " category = item.get(\"category\")\r", " subcategory = item.get(\"subcategory\")\r", " text = item.get(\"text\")\r", " description = item.get(\"description\")\r", " severity = item.get(\"severity\")\r", " link = item.get(\"link\")\r", " training = item.get(\"training\")\r", " graph_query = item.get(\"graph\")\r", " block_workbook = json.load(f)\r", " block_link = json.load(f)\r", " block_section = json.load(f)\r", " block_query = json.load(f)\r", " block_text = json.load(f)\r", " print (\"DEBUG: Loading file {0}...\".format(block_file))\r", " print(\"DEBUG: new checklist file list:\", str(checklist_file_list))\r", " print (\"DEBUG: - Query: {0}\".format(str(block_query)))\r", "\r", " try:\r", "block_workbook = None\r", " if output_file:\r", " block_file = os.path.join(blocks_path, 'block_itemgroup.json')\r", " new_section['content']['items'][0]['content']['json'] = \"## \" + category_title\r" ]
METASEP
56
azure__review-checklists
azure__review-checklists METASEP web/flaskmysql/app.py METASEP #app.py from flask import Flask, request, render_template, jsonify from flaskext.mysql import MySQL #pip install flask-mysql import pymysql import os app = Flask(__name__) # Get database credentials from environment variables mysql_server_fqdn = os.environ.get("MYSQL_FQDN") if mysql_server_fqdn == None: print("ERROR: Please define an environment variable 'MYSQL_FQDN' with the FQDN of the MySQL server") sys.exit(1) mysql_server_name = mysql_server_fqdn.split('.')[0] mysql_server_username = os.environ.get("MYSQL_USER") if mysql_server_username == None: print("ERROR: Please define an environment variable 'MYSQL_USER' with the FQDN of the MySQL username") sys.exit(1) if not mysql_server_username.__contains__('@'): mysql_server_username += '@' + mysql_server_name mysql_server_password = os.environ.get("MYSQL_PASSWORD") if mysql_server_password == None: print("ERROR: Please define an environment variable 'MYSQL_PASSWORD' with the FQDN of the MySQL password") sys.exit(1) # Open connection mysql = MySQL() app.config['MYSQL_DATABASE_USER'] = mysql_server_username app.config['MYSQL_DATABASE_PASSWORD'] = mysql_server_password app.config['MYSQL_DATABASE_DB'] = 'checklist' app.config['MYSQL_DATABASE_HOST'] = mysql_server_fqdn mysql.init_app(app) @app.route('/') def home(): app.logger.info("DEBUG: Connecting to database...") try: category_filter = request.args.get('category', None) status_filter = request.args.get('status', None) severity_filter = request.args.get('severity', None) except Exception as e: app.logger.info("ERROR reading query parameters for filters: {0}".format(str(e))) pass try: conn = mysql.connect() cursor = conn.cursor(pymysql.cursors.DictCursor) except Exception as e: app.logger.info("ERROR opening cursor to DB connection: {0}".format(str(e))) return jsonify(str(e)) try: sqlquery = "SELECT * from items" filter1_added = False # category filter if category_filter: if filter1_added: sqlquery += " AND " else: sqlquery += " WHERE " filter1_added = True sqlquery += "category = '{0}'".format(category_filter) # status filter if status_filter: if filter1_added: sqlquery += " AND " else: sqlquery += " WHERE " filter1_added = True sqlquery += "status = '{0}'".format(status_filter) # severity filter if severity_filter: if filter1_added: sqlquery += " AND " else: sqlquery += " WHERE " filter1_added = True sqlquery += "severity = '{0}'".format(severity_filter) # send queries app.logger.info ("Retrieving checklist items with query '{0}'".format(sqlquery)) cursor.execute(sqlquery) itemslist = cursor.fetchall() cursor.execute("SELECT DISTINCT category FROM items") categorylist = cursor.fetchall() cursor.execute("SELECT DISTINCT severity FROM items") severitylist = cursor.fetchall() cursor.execute("SELECT DISTINCT status FROM items") statuslist = cursor.fetchall() return render_template('index.html', itemslist=itemslist, categorylist=categorylist, severitylist=severitylist, statuslist=statuslist) except Exception as e: app.logger.info("ERROR sending query: {0}".format(str(e))) return jsonify(str(e)) @app.route("/update",methods=["POST","GET"]) def update(): app.logger.info("Processing {0} with request.form {1}".format(str(request.method), str(request.form))) try: conn = mysql.connect() cursor = conn.cursor(pymysql.cursors.DictCursor) if request.method == 'POST': field = request.form['field'] value = request.form['value'] editid = request.form['id'] app.logger.info("Processing POST for field '{0}', editid '{1}' and value '{2}'".format(field, value, editid)) if field == 'comment' and value != '': sql = "UPDATE items SET comments=%s WHERE guid=%s" data = (value, editid) conn = mysql.connect() cursor = conn.cursor() app.logger.info ("Sending SQL query '{0}' with data '{1}'".format(sql, str(data))) cursor.execute(sql, data) conn.commit() elif field == 'status' and value != '': sql = "UPDATE items SET status=%s WHERE guid=%s" data = (value, editid) conn = mysql.connect() cursor = conn.cursor() app.logger.info ("Sending SQL query '{0}' with data '{1}'".format(sql, str(data))) cursor.execute(sql, data) conn.commit() else: app.logger.info ("Field is '{0}', value is '{1}': not doing anything".format(field, value)) success = 1 return jsonify(success) except Exception as e: app.logger.info("Oh oh, there is an error: {0}".format(str(e))) success = 0 return jsonify(success) finally: cursor.close() conn.close() if __name__ == "__main__": app.run(host='0.0.0.0', debug=True) web/fillgraphdb/graph_db.py METASEP import os import sys import pymysql import json import time import requests import azure.mgmt.resourcegraph as arg from datetime import datetime from azure.mgmt.resource import SubscriptionClient from azure.identity import AzureCliCredential from azure.identity import DefaultAzureCredential from azure.identity import ClientSecretCredential # Database and table name mysql_db_name = "checklist" mysql_db_table = "items" use_ssl = "yes" # Format a string to be included in a SQL query as value def escape_quotes (this_value): return str(this_value).replace("'", "\\'") # Function to send an Azure Resource Graph query def get_resources (graph_query, argClient, subsList, argQueryOptions): # TO DO: Authentication should probably happen outside of this function try: # Create query argQuery = arg.models.QueryRequest(subscriptions=subsList, query=graph_query, options=argQueryOptions) # Run query and return results argResults = argClient.resources(argQuery) print("DEBUG: query results: {0}".format(str(argResults))) return argResults except Exception as e: print("ERROR: Error sending Azure Resource Graph query to Azure: {0}".format(str(e))) # sys.exit(0) # Debugging.... Probably this should be exit(1) return '' # Wait for IMDS endpoint to be available try: wait_max_intervals = int(os.environ.get("WAIT_INTERVALS")) print ("DEBUG: WAIT_INTERVALS read from environment variable: {0}".format(str(wait_max_intervals))) except: wait_max_intervals = 5 print ("DEBUG: WAIT_INTERVALS set to default value: {0}".format(str(wait_max_intervals))) wait_interval = 10.0 imds_url = 'http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://management.azure.com/' imds_headers = { "Metadata" : "true" } imds_tries = 0 break_loop = False print ('DEBUG: Going into waiting loop to make sure the metadata endpoint is active...') while not break_loop: imds_tries += 1 print ("DEBUG: We are in the loop, pass {0}/{1} ({2}). Trying the IMDS endpoint...".format(str(imds_tries), str(wait_max_intervals), str(datetime.now()))) if imds_tries > wait_max_intervals: print("ERROR: max wait intervals exceeded when waiting for IMDS to answer, hopefully you specified some SP credentials as SP variables...") break_loop = True else: print ("DEBUG: Sending GET request to {0}...".format(imds_url)) try: imds_response = requests.get(imds_url, headers=imds_headers, timeout=1) if imds_response.status_code >= 200 and imds_response.status_code <= 299: print ("DEBUG: IMDS endpoint seems to be working, received status code {0} and answer {1}".format(str(imds_response.status_code), str(imds_response.text))) break_loop = True else: print ("DEBUG: IMDS endpoint doesnt seem to be working, received status code {0} and answer {1}".format(str(imds_response.status_code), str(imds_response.text))) except Exception as e: print("DEBUG: Error sending request to IMDS endpoint: {0}".format(str(e))) pass if not break_loop: print("DEBUG: Going to sleep {0} seconds before next try...".format(str(wait_interval))) time.sleep (wait_interval) # Authenticate to Azure, either with Managed Identity or SP print('DEBUG: Authenticating to Azure...') try: print('DEBUG: Getting environment variables...') # credential = AzureCliCredential() # Get your credentials from Azure CLI (development only!) and get your subscription list tenant_id = os.environ.get("AZURE_TENANT_ID") client_id = os.environ.get("AZURE_CLIENT_ID") client_secret = os.environ.get("AZURE_CLIENT_SECRET") except Exception as e: print("ERROR: Error getting environment variables: {0}".format(str(e))) tenant_id = None client_id = None client_secret = None pass try: if tenant_id and client_id and client_secret: print("DEBUG: Service principal credentials (client ID {0}, tenant ID {1}) retrieved from environment variables, trying SP-based authentication now...".format(str(client_id), str(tenant_id))) credential = ClientSecretCredential(tenant_id=tenant_id, client_id=client_id, client_secret=client_secret) else: print('DEBUG: Service principal credentials could not be retrieved from environment variables, trying default authentication method with Managed Identity...') credential = DefaultAzureCredential() # Managed identity except Exception as e: print("ERROR: Error during Azure Authentication: {0}".format(str(e))) sys.exit(1) try: print('DEBUG: Getting subscriptions...') subsClient = SubscriptionClient(credential) subsRaw = [] for sub in subsClient.subscriptions.list(): subsRaw.append(sub.as_dict()) subsList = [] for sub in subsRaw: subsList.append(sub.get('subscription_id')) print ("DEBUG: provided credentials give access to {0} subscription(s)".format(str(len(subsList)))) # Create Azure Resource Graph client and set options print('DEBUG: Creating client object...') argClient = arg.ResourceGraphClient(credential) argQueryOptions = arg.models.QueryRequestOptions(result_format="objectArray") except Exception as e: print("ERROR: Error creating resource graph client object: {0}".format(str(e))) sys.exit(1) # Get database credentials from environment variables mysql_server_fqdn = os.environ.get("MYSQL_FQDN") if mysql_server_fqdn == None: print("ERROR: Please define an environment variable 'MYSQL_FQDN' with the FQDN of the MySQL server") sys.exit(1) else: print("DEBUG: mysql FQDN retrieved from environment variables: '{0}'".format(mysql_server_fqdn)) mysql_server_name = mysql_server_fqdn.split('.')[0] mysql_server_username = os.environ.get("MYSQL_USER") if mysql_server_username == None: print("ERROR: Please define an environment variable 'MYSQL_USER' with the FQDN of the MySQL username") sys.exit(1) else: print("DEBUG: mysql authentication username retrieved from environment variables: '{0}'".format(mysql_server_username)) if not mysql_server_username.__contains__('@'): mysql_server_username += '@' + mysql_server_name mysql_server_password = os.environ.get("MYSQL_PASSWORD") if mysql_server_password == None: print("ERROR: Please define an environment variable 'MYSQL_PASSWORD' with the FQDN of the MySQL password") sys.exit(1) else: print("DEBUG: mysql authentication password retrieved from environment variables: {0}".format("********")) # Create connection to MySQL server and number of records print ("DEBUG: Connecting to '{0}' with username '{1}'...".format(mysql_server_fqdn, mysql_server_username)) if use_ssl == 'yes': db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, database = mysql_db_name, passwd = mysql_server_password, ssl = {'ssl':{'ca': 'BaltimoreCyberTrustRoot.crt.pem'}}) else: db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, database = mysql_db_name, passwd = mysql_server_password) sql_query = "SELECT * FROM {0} WHERE graph_query_success IS NOT null AND graph_query_failure IS NOT null AND graph_query_success != 'None' AND graph_query_failure != 'None';".format (mysql_db_table) cursor = db.cursor() cursor.execute(sql_query) rows = cursor.fetchall() row_cnt = 0 if len(rows) > 0: for row in rows: row_cnt += 1 result_text = '' item_guid = row[0] item_success_query = row[10] item_failure_query = row[11] # print ("DEBUG {0}: '{1}', '{2}'".format(item_guid, item_success_query, item_failure_query)) success_resources = str(get_resources(item_success_query, argClient, subsList, argQueryOptions)).replace("'", '"') success_resources = success_resources.replace(': None', ': "None"') # print ("DEBUG: SUCCESS QUERY: {0}".format(success_resources)) if success_resources: try: success_resources_object = json.loads(success_resources) except: print("ERROR: JSON returned from Azure Graph Query not valid: {0}".format(success_resources)) for resource in success_resources_object['data']: if result_text: result_text += '\n' result_text += "SUCCESS: {0}".format(resource["id"]) failure_resources = str(get_resources(item_failure_query, argClient, subsList, argQueryOptions)).replace("'", '"') failure_resources = failure_resources.replace(': None', ': "None"') # print ("DEBUG: FAILURE QUERY: {0}".format(failure_resources)) if failure_resources: try: failure_resources_object = json.loads(failure_resources) except: print("ERROR: JSON returned from Azure Graph Query not valid: {0}".format(failure_resources)) for resource in failure_resources_object['data']: if result_text: result_text += '\n' result_text += "FAILURE: {0}".format(resource["id"]) # print ("DEBUG: Result summary: \n{0}".format(result_text)) if result_text: update_query = "UPDATE items SET graph_query_result = '{0}' WHERE guid = '{1}';".format(result_text, item_guid) print ("DEBUG: sending SQL query '{0}'".format(update_query)) try: cursor.execute(update_query) db.commit() except Exception as e: print("ERROR: Error sending SQL query to MySql server: {0}".format(str(e))) pass else: print("DEBUG: No results could be retrieved for the success and failure queries of checklist item {0}".format(item_guid)) else: row_count = 0 print ("INFO: Processed table {0} in database {1} with {2} records with graph queries. Happy review!".format(mysql_db_table, mysql_db_name, str(row_cnt))) # Bye db.close() web/filldb/fill_db.py METASEP import requests import json import os import sys import pymysql # Database and table name mysql_db_name = "checklist" mysql_db_table = "items" use_ssl = "yes" # Format a string to be included in a SQL query as value def escape_quotes(this_value): return str(this_value).replace("'", "\\'") # Get database credentials from environment variables mysql_server_fqdn = os.environ.get("MYSQL_FQDN") if mysql_server_fqdn == None: print("ERROR: Please define an environment variable 'MYSQL_FQDN' with the FQDN of the MySQL server") sys.exit(1) mysql_server_name = mysql_server_fqdn.split('.')[0] mysql_server_username = os.environ.get("MYSQL_USER") if mysql_server_username == None: print("ERROR: Please define an environment variable 'MYSQL_USER' with the FQDN of the MySQL username") sys.exit(1) if not mysql_server_username.__contains__('@'): mysql_server_username += '@' + mysql_server_name mysql_server_password = os.environ.get("MYSQL_PASSWORD") if mysql_server_password == None: print("ERROR: Please define an environment variable 'MYSQL_PASSWORD' with the FQDN of the MySQL password") sys.exit(1) # Create connection to MySQL server and get version print ("INFO: Connecting to {0} with username {1}...".format(mysql_server_fqdn, mysql_server_username)) if use_ssl == 'yes': db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, passwd = mysql_server_password, ssl = {'ssl':{'ca': 'BaltimoreCyberTrustRoot.crt.pem'}}) else: db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, passwd = mysql_server_password) sql_query = "SELECT VERSION();" cursor = db.cursor() cursor.execute(sql_query) rows = cursor.fetchall() data = "" if len(rows) > 0: for row in rows: if len(data) > 0: data += ', ' data += str(''.join(row)) print ("INFO: Connected to MySQL server {0} with version {1}".format(mysql_server_fqdn, data)) # Delete db if existed sql_query = "DROP DATABASE IF EXISTS {0};".format(mysql_db_name) # print ("Sending query: {0}".format(sql_query)) cursor.execute(sql_query) db.commit() # Create database sql_query = "CREATE DATABASE IF NOT EXISTS {0};".format(mysql_db_name) # print ("Sending query: {0}".format(sql_query)) cursor.execute(sql_query) db.commit() sql_query = "USE {0}".format(mysql_db_name) # print ("Sending query: {0}".format(sql_query)) cursor.execute(sql_query) db.commit() # Create table sql_query = """CREATE TABLE {0} ( guid varchar(40), text varchar(1024), description varchar(1024), link varchar(255), training varchar(255), comments varchar(1024), severity varchar(10), status varchar(15), category varchar(255), subcategory varchar(255), graph_query_success varchar(1024), graph_query_failure varchar(1024), graph_query_result varchar(4096) );""".format(mysql_db_table) # print ("DEBUG: Sending query: {0}".format(sql_query)) cursor.execute(sql_query) db.commit() # Download checklist technology = os.environ.get("CHECKLIST_TECHNOLOGY") if technology: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/" + technology + "_checklist.en.json" else: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/lz_checklist.en.json" response = requests.get(checklist_url) # If download was successful if response.status_code == 200: print ("INFO: File {0} downloaded successfully".format(checklist_url)) try: # Deserialize JSON to object variable checklist_object = json.loads(response.text) except Exception as e: print("Error deserializing JSON content: {0}".format(str(e))) sys.exit(1) # Get default status from the JSON, default to "Not verified" try: status_list = checklist_object.get("status") default_status = status_list[0].get("name") except: default_status = "Not verified" pass # For each checklist item, add a row to mysql DB row_counter = 0 for item in checklist_object.get("items"): guid = item.get("guid") category = item.get("category") subcategory = item.get("subcategory") text = escape_quotes(item.get("text")) description = escape_quotes(item.get("description")) severity = item.get("severity") link = item.get("link") training = item.get("training") status = default_status graph_query_success = escape_quotes(item.get("graph_success")) graph_query_failure = escape_quotes(item.get("graph_failure")) # print("DEBUG: Adding to table {0}: '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', '{8}', '{9}', '{10}'".format(mysql_db_table, category, subcategory, text, description, severity, link, training, graph_query_success, graph_query_failure, guid)) sql_query = """INSERT INTO {0} (category,subcategory,text,description,severity,link,training,graph_query_success,graph_query_failure,guid,status) VALUES ('{1}','{2}','{3}','{4}','{5}', '{6}','{7}','{8}','{9}','{10}', '{11}');""".format(mysql_db_table, category, subcategory, text, description, severity, link, training, graph_query_success, graph_query_failure, guid, status) # print ("DEBUG: Sending query: {0}".format(sql_query)) cursor.execute(sql_query) db.commit() row_counter += 1 else: print ("Error downloading {0}".format(checklist_url)) # Bye print("INFO: {0} rows added to database.".format(str(row_counter))) db.close() web/filldb/check_db.py METASEP import os import sys import pymysql # Database and table name mysql_db_name = "checklist" mysql_db_table = "items" use_ssl = "yes" # Format a string to be included in a SQL query as value def escape_quotes(this_value): return str(this_value).replace("'", "\\'") # Get database credentials from environment variables mysql_server_fqdn = os.environ.get("MYSQL_FQDN") if mysql_server_fqdn == None: print("Please define an environment variable 'MYSQL_FQDN' with the FQDN of the MySQL server") sys.exit(1) mysql_server_name = mysql_server_fqdn.split('.')[0] mysql_server_username = os.environ.get("MYSQL_USER") if mysql_server_username == None: print("Please define an environment variable 'MYSQL_USER' with the FQDN of the MySQL username") sys.exit(1) if not mysql_server_username.__contains__('@'): mysql_server_username += '@' + mysql_server_name mysql_server_password = os.environ.get("MYSQL_PASSWORD") if mysql_server_password == None: print("Please define an environment variable 'MYSQL_PASSWORD' with the FQDN of the MySQL password") sys.exit(1) # Create connection to MySQL server and number of records print ("Connecting to {0} with username {1}...".format(mysql_server_fqdn, mysql_server_username)) if use_ssl == 'yes': db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, database = mysql_db_name, passwd = mysql_server_password, ssl = {'ssl':{'ca': 'BaltimoreCyberTrustRoot.crt.pem'}}) else: db = pymysql.connect(host=mysql_server_fqdn, user = mysql_server_username, database = mysql_db_name, passwd = mysql_server_password) sql_query = "SELECT COUNT(*) FROM {0};".format (mysql_db_table) cursor = db.cursor() cursor.execute(sql_query) rows = cursor.fetchall() if len(rows) > 0: row_count = rows[0][0] else: row_count = 0 print ("Table {0} in database {1} contains {2} records".format(mysql_db_table, mysql_db_name, str(row_count))) # Bye db.close() scripts/workbook_create.py METASEP ###################################################################### # # This script reads the checklist items from the latest checklist file # in Github (or from a local file) and generates an Azure Monitor # workbook in JSON format. # # Last updated: February 2023 # ###################################################################### import json import argparse import sys import os import requests import glob import uuid # Get input arguments parser = argparse.ArgumentParser(description='Generate Azure Monitor workbook from Azure Review Checklist') parser.add_argument('--checklist-file', dest='checklist_file', action='store', help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github') parser.add_argument('--only-english', dest='only_english', action='store_true', default=False, help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)') parser.add_argument('--find-all', dest='find_all', action='store_true', default=False, help='if checklist files are specified, find all the languages for the given checklists (default: False)') parser.add_argument('--technology', dest='technology', action='store', help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github') parser.add_argument('--output-file', dest='output_file', action='store', help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place') parser.add_argument('--output-path', dest='output_path', action='store', help='Folder where to store the results (using the same name as the input_file)') parser.add_argument('--blocks-path', dest='blocks_path', action='store', help='Folder where the building blocks to build the workbook are stored)') parser.add_argument('--create-arm-template', dest='create_arm_template', action='store_true', default=False, help='create an ARM template, additionally to the workbook JSON (default: False)') parser.add_argument('--category', dest='category', action='store', help='if the workbook should be restricted to a category containing the specified string') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() checklist_file = args.checklist_file technology = args.technology block_workbook = None block_link = None block_section = None block_query = None block_text = None query_size = 4 # 0: medium, 1: small, 4: tiny # Workbook building blocks def load_building_blocks(): # Define the blocks as global variables global block_workbook global block_link global block_section global block_query global block_text global block_arm # Set folder where to load from if args.blocks_path: blocks_path = args.blocks_path if args.verbose: print ("DEBUG: Setting building block folder to {0}".format(blocks_path)) else: print("ERROR: please use the argument --blocks-path to specify the location of the workbook building blocks.") sys.exit(1) # Load initial workbook building block block_file = os.path.join(blocks_path, 'block_workbook.json') if args.verbose: print ("DEBUG: Loading file {0}...".format(block_file)) try: with open(block_file) as f: block_workbook = json.load(f) except Exception as e: print("ERROR: Error when opening JSON workbook building block", block_file, "-", str(e)) sys.exit(0) # Load link building block block_file = os.path.join(blocks_path, 'block_link.json') if args.verbose: print ("DEBUG: Loading file {0}...".format(block_file)) try: with open(block_file) as f: block_link = json.load(f) except Exception as e: print("ERROR: Error when opening JSON workbook building block", block_file, "-", str(e)) sys.exit(0) # Load itemgroup (aka section) building block block_file = os.path.join(blocks_path, 'block_itemgroup.json') if args.verbose: print ("DEBUG: Loading file {0}...".format(block_file)) try: with open(block_file) as f: block_section = json.load(f) except Exception as e: print("ERROR: Error when opening JSON workbook building block", block_file, "-", str(e)) sys.exit(0) # Load query building block block_file = os.path.join(blocks_path, 'block_query.json') if args.verbose: print ("DEBUG: Loading file {0}...".format(block_file)) try: with open(block_file) as f: block_query = json.load(f) except Exception as e: print("ERROR: Error when opening JSON workbook building block", block_file, "-", str(e)) sys.exit(0) # Load text building block block_file = os.path.join(blocks_path, 'block_text.json') if args.verbose: print ("DEBUG: Loading file {0}...".format(block_file)) try: with open(block_file) as f: block_text = json.load(f) except Exception as e: print("ERROR: Error when opening JSON workbook building block", block_file, "-", str(e)) sys.exit(0) # Load ARM template building block block_file = os.path.join(blocks_path, 'block_arm.json') if args.verbose: print ("DEBUG: Loading file {0}...".format(block_file)) try: with open(block_file) as f: block_arm = json.load(f) except Exception as e: print("ERROR: Error when opening JSON ARM template building block", block_file, "-", str(e)) sys.exit(0) # Function that corrects format issues in the queries stored in JSON def fix_query_format(query_string): if query_string: query_string = str(query_string).replace('\\\\', '\\') # Replace a double escaping inverted bar ('\\\\') through a single one ('\') return query_string else: return None # Function that transforms a JSON string to be included in an ARM template def serialize_data(workbook_string): if workbook_string: # Escape double quotes workbook_string = str(workbook_string).replace('"', '\"') # Escape escape characters # workbook_string = str(workbook_string).replace('\\', '\\\\') # Undo the scaping for the newline character (otherwise the markdown in the workbook would look wrong). # Note that this might impact newline characters in queries! # workbook_string = str(workbook_string).replace('\\\\n', '\\n') return workbook_string else: return None # Main function to generate the workbook JSON def generate_workbook(output_file, checklist_data): # Initialize an empty workbook workbook = json.loads(json.dumps(block_workbook)) workbook_title = "## " + checklist_data['metadata']['name'] if args.category: workbook_title += ' - ' + args.category[0].upper() + args.category[1:] workbook_title += "\n---\n\nThis workbook has been automatically generated out of the checklists in the [Azure Review Checklists repo](https://github.com/Azure/review-checklists)." workbook['items'][0]['content']['json'] = workbook_title # Decide whether we will match in the category, or subcategory, and update the corresponding variables if args.category: if args.verbose: print("DEBUG: creating tab list with subcategories list for categories containing the term {0}...".format(args.category)) tab_name_field = 'subcategory' tab_title_list = [x["subcategory"] for x in checklist_data.get("items") if (args.category.lower() in str(x["category"]).lower())] tab_title_list = list(set(tab_title_list)) else: if args.verbose: print("DEBUG: creating tab list with categories...") tab_name_field = 'category' tab_title_list = [x["name"] for x in checklist_data.get("categories")] if args.verbose: print("DEBUG: created tab list: {0}".format(str(tab_title_list))) # Generate one tab in the workbook for each category/subcategory tab_id = 0 query_id = 0 tab_dict = {} for tab_title in tab_title_list: tab_dict[tab_title] = tab_id # We will use this dict later to know where to put each query tab_id += 1 # Create new link new_link = block_link.copy() new_link['id'] = str(uuid.uuid4()) # RANDOM GUID new_link['linkLabel'] = tab_title new_link['subTarget'] = 'category' + str(tab_id) new_link['preText'] = tab_title # Create new section new_section = block_section.copy() new_section['name'] = 'category' + str(tab_id) new_section['conditionalVisibility']['value'] = 'category' + str(tab_id) new_section['content']['items'][0]['content']['json'] = "## " + tab_title new_section['content']['items'][0]['name'] = 'category' + str(tab_id) + 'title' # Add link and query to workbook # if args.verbose: # print() # print ("DEBUG: Adding link: {0}".format(json.dumps(new_link))) # print ("DEBUG: Adding section: {0}".format(json.dumps(new_section))) # print("DEBUG: Workbook so far: {0}".format(json.dumps(workbook))) workbook['items'][2]['content']['links'].append(new_link.copy()) # I am getting crazy with Python variable references :( # Add section to workbook new_new_section=json.loads(json.dumps(new_section.copy())) workbook['items'].append(new_new_section) if args.verbose: print("DEBUG: category dictionary generated: {0}".format(str(tab_dict))) # For each checklist item, add a query to the workbook for item in checklist_data.get("items"): # Read variables from JSON guid = item.get("guid") tab = item.get(tab_name_field) text = item.get("text") description = item.get("description") severity = item.get("severity") link = item.get("link") training = item.get("training") graph_query = fix_query_format(item.get("graph")) if graph_query and (tab in tab_title_list): if args.verbose: print("DEBUG: adding sections to workbook for ARG query '{0}', length of query is {1}".format(str(graph_query), str(len(str(graph_query))))) query_id += 1 # Create new text new_text = block_text.copy() new_text['name'] = 'querytext' + str(query_id) new_text['content']['json'] = text if link: new_text['content']['json'] += ". Check [this link](" + link + ") for further information." if training: new_text['content']['json'] += ". [This training](" + training + ") can help to educate yourself on this." # Create new query new_query = block_query.copy() new_query['name'] = 'query' + str(query_id) new_query['content']['query'] = graph_query new_query['content']['size'] = query_size # Add text and query to the workbook tab_id = tab_dict[tab] + len(block_workbook['items']) if args.verbose: print ("DEBUG: Adding text and query to tab ID {0} ({1})".format(str(tab_id), tab)) print ("DEBUG: Workbook object name is {0}".format(workbook['items'][tab_id]['name'])) new_new_text = json.loads(json.dumps(new_text.copy())) new_new_query = json.loads(json.dumps(new_query.copy())) workbook['items'][tab_id]['content']['items'].append(new_new_text) workbook['items'][tab_id]['content']['items'].append(new_new_query) # Dump the workbook to the output file or to console, if there was any query in the original checklist if query_id > 0: if output_file: # Dump workbook JSON into a file workbook_string = json.dumps(workbook, indent=4) with open(output_file, 'w', encoding='utf-8') as f: f.write(workbook_string) f.close() # Create ARM template (optionally, if specified in the parameters) if args.create_arm_template: arm_output_file = os.path.splitext(output_file)[0] + '_template.json' if args.verbose: print ("DEBUG: Creating ARM template in file {0}...".format(arm_output_file)) block_arm['parameters']['workbookDisplayName']['defaultValue'] = checklist_data['metadata']['name'] if args.category: block_arm['parameters']['workbookDisplayName']['defaultValue'] += ' - ' + args.category[0].upper() + args.category[1:] block_arm['resources'][0]['properties']['serializedData'] = serialize_data(workbook_string) arm_string = json.dumps(block_arm, indent=4) with open(arm_output_file, 'w', encoding='utf-8') as f: f.write(arm_string) f.close() else: print(workbook_string) else: print("INFO: sorry, the analyzed checklist did not contain any graph query") def get_output_file(checklist_file_or_url, is_file=True): if is_file: output_file = os.path.basename(checklist_file_or_url) else: output_file = checklist_file_or_url.split('/')[-1] if args.output_file: return args.output_file elif args.output_path: # Get filename without path and extension output_file = os.path.join(args.output_path, output_file) # If category specified, add to output file name if args.category: return os.path.splitext(output_file)[0] + '_' + str(args.category).lower() + '_workbook.json' else: return os.path.splitext(output_file)[0] + '_workbook.json' else: output_file = None ######## # Main # ######## # First thing of all, load the building blocks load_building_blocks() if args.verbose: print ("DEBUG: building blocks variables intialized:") print ("DEBUG: - Workbook: {0}".format(str(block_workbook))) print ("DEBUG: - Number of items: {0}".format(str(len(block_workbook['items'])))) print ("DEBUG: - Link: {0}".format(str(block_link))) print ("DEBUG: - Query: {0}".format(str(block_query))) # Download checklist or process from local file if checklist_file: checklist_file_list = checklist_file.split(" ") # Take only the English versions of the checklists (JSON files) checklist_file_list = [file[:-8] + '.en.json' for file in checklist_file_list if (os.path.splitext(file)[1] == '.json')] # Remove duplicates checklist_file_list = list(set(checklist_file_list)) # Go over the list(s) for checklist_file in checklist_file_list: if args.verbose: print("DEBUG: Opening checklist file", checklist_file) # Get JSON try: # Open file with open(checklist_file) as f: checklist_data = json.load(f) # Set output file variable output_file = get_output_file(checklist_file, is_file=True) # Generate workbook generate_workbook(output_file, checklist_data) # If error, just continue except Exception as e: print("ERROR: Error when processing JSON file", checklist_file, "-", str(e)) # sys.exit(0) else: # If no input files specified, fetch the latest from Github... if technology: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/" + technology + "_checklist.en.json" else: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/lz_checklist.en.json" if args.verbose: print("DEBUG: Downloading checklist file from", checklist_url) response = requests.get(checklist_url) # If download was successful if response.status_code == 200: if args.verbose: print ("DEBUG: File {0} downloaded successfully".format(checklist_url)) try: # Deserialize JSON to object variable checklist_data = json.loads(response.text) except Exception as e: print("Error deserializing JSON content: {0}".format(str(e))) sys.exit(1) # Set output files output_file = get_output_file(checklist_url, is_file=False) # Generate workbook generate_workbook(output_file, checklist_data) scripts/update_excel_openpyxl.py METASEP ###################################################################### # # This script reads the checklist items from the latest checklist file # in Github (or from a local file) and populates an Excel spreadsheet # with the contents. # # Last updated: March 2022 # ###################################################################### import json import argparse import sys import os import requests import glob from openpyxl import load_workbook from openpyxl.worksheet.datavalidation import DataValidation # Get input arguments parser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results') parser.add_argument('--checklist-file', dest='checklist_file', action='store', help='You can optionally supply a JSON file containing the checklist you want to dump to the Excel spreadsheet. Otherwise it will take the latest file from Github') parser.add_argument('--only-english', dest='only_english', action='store_true', default=False, help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)') parser.add_argument('--find-all', dest='find_all', action='store_true', default=False, help='if checklist files are specified, find all the languages for the given checklists (default: False)') parser.add_argument('--technology', dest='technology', action='store', help='If you do not supply a JSON file with the checklist, you need to specify the technology from which the latest checklist will be downloaded from Github') parser.add_argument('--excel-file', dest='excel_file', action='store', help='You need to supply an Excel file where the checklist will be written') parser.add_argument('--output-excel-file', dest='output_excel_file', action='store', help='You can optionally supply an Excel file where the checklist will be saved, otherwise it will be updated in-place') parser.add_argument('--output-path', dest='output_path', action='store', help='If using --output-name-is-input-name, folder where to store the results') parser.add_argument('--output-name-is-input-name', dest='output_name_is_input_name', action='store_true', default=False, help='Save the output in a file with the same filename as the JSON input, but with xlsx extension') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() checklist_file = args.checklist_file excel_file = args.excel_file technology = args.technology # Constants worksheet_checklist_name = 'Checklist' row1 = 8 # First row after which the Excel spreadsheet will be updated col_checklist_name = "A" row_checklist_name = "4" guid_column_index = "L" comment_column_index = "G" sample_cell_index = 'A4' col_area = "A" col_subarea = "B" col_check = "C" col_desc = "D" col_sev = "E" col_status = "F" col_comment = "G" col_link = "H" col_training = "I" col_arg_success = "J" col_arg_failure = "K" col_guid = "L" info_link_text = 'More info' training_link_text = 'Training' worksheet_values_name = 'Values' values_row1 = 2 col_values_severity = "A" col_values_status = "B" col_values_area = "C" col_values_description = "H" # Main function def update_excel_file(input_excel_file, output_excel_file, checklist_data): # Load workbook try: wb = load_workbook(filename = input_excel_file) if args.verbose: print("DEBUG: workbook", input_excel_file, "opened successfully") except Exception as e: print("ERROR: Error when opening Excel file", input_excel_file, "-", str(e)) sys.exit(1) # Get worksheet try: ws = wb[worksheet_checklist_name] if args.verbose: print("DEBUG: worksheet", worksheet_checklist_name, "selected successfully") except Exception as e: print("ERROR: Error when selecting worksheet", worksheet_checklist_name, "-", str(e)) sys.exit(1) # Set checklist name try: ws[col_checklist_name + row_checklist_name] = checklist_data["metadata"]["name"] if args.verbose: print("DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'".format(checklist_data["metadata"]["name"])) except Exception as e: print("ERROR: Error when selecting worksheet", worksheet_checklist_name, "-", str(e)) sys.exit(1) # Get default status from the JSON, default to "Not verified" try: status_list = checklist_data.get("status") default_status = status_list[0].get("name") if args.verbose: print ("DEBUG: default status retrieved from checklist: '{0}'".format(default_status)) except: default_status = "Not verified" if args.verbose: print ("DEBUG: Using default status 'Not verified'") pass # For each checklist item, add a row to spreadsheet row_counter = row1 for item in checklist_data.get("items"): # Read variables from JSON guid = item.get("guid") category = item.get("category") subcategory = item.get("subcategory") text = item.get("text") description = item.get("description") severity = item.get("severity") link = item.get("link") training = item.get("training") status = default_status graph_query_success = item.get("graph_success") graph_query_failure = item.get("graph_failure") # Update Excel ws[col_area + str(row_counter)].value = category ws[col_subarea + str(row_counter)].value = subcategory ws[col_check + str(row_counter)].value = text ws[col_desc + str(row_counter)].value = description ws[col_sev + str(row_counter)].value = severity ws[col_status + str(row_counter)].value = status ws[col_link + str(row_counter)].value = link # if link != None: # link_elements = link.split('#') # link_address = link_elements[0] # if len(link_elements) > 1: # link_subaddress = link_elements[1] # else: # link_subaddress = "" # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip="", TextToDisplay=info_link_text) ws[col_training + str(row_counter)].value = training # if training != None: # training_elements = training.split('#') # training_address = training_elements[0] # if len(training_elements) > 1: # training_subaddress = training_elements[1] # else: # training_subaddress = "" # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip="", TextToDisplay=training_link_text) # GUID and ARG queries ws[col_arg_success + str(row_counter)].value = graph_query_success ws[col_arg_failure + str(row_counter)].value = graph_query_failure ws[col_guid + str(row_counter)].value = guid # Next row row_counter += 1 # Display summary if args.verbose: number_of_checks = row_counter - row1 print("DEBUG:", str(number_of_checks), "checks addedd to Excel spreadsheet") # Get worksheet try: wsv = wb[worksheet_values_name] if args.verbose: print("DEBUG: worksheet", worksheet_values_name, "selected successfully") except Exception as e: print("ERROR: Error when selecting worksheet", worksheet_values_name, "-", str(e)) sys.exit(1) # Update categories row_counter = values_row1 for item in checklist_data.get("categories"): area = item.get("name") wsv[col_values_area + str(row_counter)].value = area row_counter += 1 # Display summary if args.verbose: print("DEBUG:", str(row_counter - values_row1), "categories addedd to Excel spreadsheet") # Update status row_counter = values_row1 for item in checklist_data.get("status"): status = item.get("name") description = item.get("description") wsv[col_values_status + str(row_counter)].value = status wsv[col_values_description + str(row_counter)].value = description row_counter += 1 # Display summary if args.verbose: print("DEBUG:", str(row_counter - values_row1), "statuses addedd to Excel spreadsheet") # Update severities row_counter = values_row1 for item in checklist_data.get("severities"): severity = item.get("name") wsv[col_values_severity + str(row_counter)].value = severity row_counter += 1 # Display summary if args.verbose: print("DEBUG:", str(row_counter - values_row1), "severities addedd to Excel spreadsheet") # Data validation # dv = DataValidation(type="list", formula1='=Values!$B$2:$B$6', allow_blank=True, showDropDown=True) dv = DataValidation(type="list", formula1='=Values!$B$2:$B$6', allow_blank=True) rangevar = col_status + str(row1) +':' + col_status + str(row1 + number_of_checks) if args.verbose: print("DEBUG: adding data validation to range", rangevar) dv.add(rangevar) ws.add_data_validation(dv) # Close book if args.verbose: print("DEBUG: saving workbook", output_excel_file) try: wb.save(output_excel_file) except Exception as e: print("ERROR: Error when saving Excel file to", output_excel_file, "-", str(e)) sys.exit(1) ######## # Main # ######## # Download checklist if checklist_file: checklist_file_list = checklist_file.split(" ") # If --only-english parameter was supplied, take only the English version and remove duplicates if args.only_english: checklist_file_list = [file[:-8] + '.en.json' for file in checklist_file_list] checklist_file_list = list(set(checklist_file_list)) if args.verbose: print("DEBUG: new checklist file list:", str(checklist_file_list)) # If --find-all paramater was supplied, find all the languages for the checklist if args.find_all: new_file_list = [] for checklist_file in checklist_file_list: filedir = os.path.dirname(checklist_file) filebase = os.path.basename(checklist_file) filebase_noext = filebase[:-8] # Remove '.en.json' file_match_list = glob.glob(os.path.join(filedir, filebase_noext + '.*.json')) for checklist_match in file_match_list: # new_file_list.append(os.path.join(filedir, checklist_match)) new_file_list.append(checklist_match) checklist_file_list = list(set(new_file_list)) if args.verbose: print("DEBUG: new checklist file list:", str(checklist_file_list)) # Go over the list for checklist_file in checklist_file_list: if args.verbose: print("DEBUG: Opening checklist file", checklist_file) # Get JSON try: with open(checklist_file) as f: checklist_data = json.load(f) except Exception as e: print("ERROR: Error when processing JSON file", checklist_file, "-", str(e)) sys.exit(0) # Set input and output files input_excel_file = excel_file if args.output_excel_file: output_excel_file = args.output_excel_file elif args.output_name_is_input_name: if args.output_path: # Get filename without path and extension output_excel_file = os.path.splitext(os.path.basename(checklist_file))[0] + '.xlsx' output_excel_file = os.path.join(args.output_path, output_excel_file) else: # Just change the extension output_excel_file = os.path.splitext(checklist_file)[0] + '.xlsx' # Update spreadsheet update_excel_file(input_excel_file, output_excel_file, checklist_data) else: if technology: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/" + technology + "_checklist.en.json" else: checklist_url = "https://raw.githubusercontent.com/Azure/review-checklists/main/checklists/lz_checklist.en.json" if args.verbose: print("DEBUG: Downloading checklist file from", checklist_url) response = requests.get(checklist_url) # If download was successful if response.status_code == 200: if args.verbose: print ("DEBUG: File {0} downloaded successfully".format(checklist_url)) try: # Deserialize JSON to object variable checklist_data = json.loads(response.text) except Exception as e: print("Error deserializing JSON content: {0}".format(str(e))) sys.exit(1) # Upload spreadsheet if args.output_excel_file: output_excel_file = args.output_excel_file else: output_excel_file = excel_file update_excel_file(excel_file, output_excel_file, checklist_data) scripts/translate.py METASEP import requests import os import argparse import sys import json import uuid # Variables translate_keys = ('description', 'name', 'category', 'subcategory', 'text', 'severity') translate_languages = ['es', 'ja', 'pt', 'ko'] # Get environment variables translator_endpoint = os.environ["AZURE_TRANSLATOR_ENDPOINT"] translator_region = os.environ["AZURE_TRANSLATOR_REGION"] translator_key = os.environ["AZURE_TRANSLATOR_SUBSCRIPTION_KEY"] translator_url = translator_endpoint + 'translate' # Get input arguments parser = argparse.ArgumentParser(description='Translate a JSON file') parser.add_argument('--input-file-name', dest='file_name_in', action='store', help='you need to supply file name where your JSON to be translated is located') parser.add_argument('--output-file-name', dest='file_name_out', action='store', help='you need to supply file name where the translated JSON will be saved') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() # Check we have all information if translator_endpoint and translator_region and translator_key: if args.verbose: print('DEBUG: environment variables retrieved successfully: {0}, {1}, {2}'.format(translator_endpoint, translator_region, translator_key)) else: print('ERROR: couldnt retrieve environment variables for translation') sys.exit(1) # Get JSON try: with open(args.file_name_in) as f: checklist = json.load(f) except Exception as e: print("ERROR: Error when processing JSON file", args.file_name_in, "-", str(e)) sys.exit(1) # Function to translate a single line of text to a single language def translate_text(text_to_translate, languages): if args.verbose: print('DEBUG: translating text "{0}" on {1}...'.format(text_to_translate, translator_url)) # If a single languages specified, convert to array if not type(languages) == list: languages = [languages] # Azure Translator parameters translator_params = { 'api-version': '3.0', 'from': 'en', 'to': languages } translator_headers = { 'Ocp-Apim-Subscription-Key': translator_key, 'Ocp-Apim-Subscription-Region': translator_region, 'Content-type': 'application/json', 'Accept': 'application/json', 'X-ClientTraceId': str(uuid.uuid4()) } translator_body = [{ 'text': text_to_translate }] if args.verbose: print ("DEBUG: sending body", str(translator_body)) print ("DEBUG: sending HTTP headers", str(translator_headers)) print ("DEBUG: sending parameters", str(translator_params)) try: request = requests.post(translator_url, params=translator_params, headers=translator_headers, json=translator_body) response = request.json() if args.verbose: print("DEBUG: translator response:") print(json.dumps(response, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))) return str(response[0]['translations'][0]['text']) except Exception as e: print("ERROR: Error in translation:", str(e)) # Go over all keys and translate them if required def translate_object(checklist_object, language): translated_object = checklist_object.copy() for (k, v) in translated_object.items(): if isinstance(v, list): translated_items = [] for list_item in v: translated_items.append(translate_object(list_item, language)) translated_object[k] = translated_items else: if k in translate_keys: # print("Found key", k, "and scalar value", v) translated_object[k] = translate_text(v, language) return translated_object ################ # Main # ################ if args.verbose: print("DEBUG: Starting translations for languages", str(translate_languages)) for using_language in translate_languages: print("INFO: Starting translation to", using_language) translated_checklist = translate_object(checklist, using_language) # If no output file was specified, use the input file, and append the language as extension before .json if not args.file_name_out: file_name_in_base = os.path.basename(args.file_name_in) file_name_in_dir = os.path.dirname(args.file_name_in) file_name_in_noext = file_name_in_base.split('.')[0] file_name_out = file_name_in_noext + '.' + using_language + '.json' file_name_out = os.path.join(file_name_in_dir, file_name_out) print("INFO: saving output file to", file_name_out) translated_checklist_string = json.dumps(translated_checklist, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': ')) with open(file_name_out, 'w', encoding='utf-8') as f: f.write(translated_checklist_string) f.close() # print(json.dumps(translated_checklist, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))) scripts/timestamp_checklist.py METASEP ################################################################################# # # This script updates the timestamp of a specific checklist and saves it. # # Last updated: January 2023 # ################################################################################# import json import argparse import sys import requests import datetime # Get input arguments parser = argparse.ArgumentParser(description='Timestamp an Azure Review Checklist') parser.add_argument('--input-file', dest='input_file', action='store', help='You need to supply the name of the JSON file with the checklist to be timestamped') parser.add_argument('--output-file', dest='output_file', action='store', help='You can optionally supply the name of a new JSON file that will be used to save the output. Otherwise the sorted checklist will replace the unused one') parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='do not save anything, only output to console (default: False)') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() if not args.input_file: print("ERROR: no input file specified, not doing anything") # Load the checklist try: with open(args.input_file) as f: checklist = json.load(f) except Exception as e: print("ERROR: Error when processing JSON file, nothing changed", args.input_file, "-", str(e)) # Overwrite the timestamp checklist['metadata']['timestamp'] = datetime.date.today().strftime("%B %d, %Y") # If dry-run, show on screen if args.dry_run: print(json.dumps(checklist, indent=4)) # Saving output file if specified in the argument if not args.dry_run: if args.output_file: output_file = args.output_file else: output_file = args.input_file if args.verbose: print("DEBUG: saving output file to", output_file) checklist_string = json.dumps(checklist, indent=4) with open(output_file, 'w', encoding='utf-8') as f: f.write(checklist_string) f.close() scripts/sort_checklist.py METASEP ################################################################################# # # This script sorts a specific checklist and saves it. # # Last updated: January 2023 # ################################################################################# import json import argparse import sys import requests # Get input arguments parser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results') parser.add_argument('--input-file', dest='input_file', action='store', help='You need to supply the name of the JSON file with the checklist to be filtered') parser.add_argument('--output-file', dest='output_file', action='store', help='You can optionally supply the name of a new JSON file that will be used to save the output. Otherwise the sorted checklist will replace the unused one') parser.add_argument('--dry-run', dest='dry_run', action='store_true', default=False, help='do not save anything, only output to console (default: False)') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() if not args.input_file: print("ERROR: no input file specified, not doing anything") # Load the checklist try: with open(args.input_file) as f: checklist = json.load(f) except Exception as e: print("ERROR: Error when processing JSON file, nothing changed", args.input_file, "-", str(e)) # Sort the items per category and subcategory items = checklist['items'] items = sorted(items, key=lambda k: (k['category'],k["subcategory"])) checklist['items'] = items # If dry-run, show on screen if args.dry_run: print(json.dumps(checklist, indent=4)) # Saving output file if specified in the argument if not args.dry_run: if args.output_file: output_file = args.output_file else: output_file = args.input_file if args.verbose: print("DEBUG: saving output file to", output_file) checklist_string = json.dumps(checklist, indent=4) with open(output_file, 'w', encoding='utf-8') as f: f.write(checklist_string) f.close() scripts/compile_checklist.py METASEP ################################################################################# # # This script attempts to build a unified checklist out of all the different checklists # stored in this repo, and optionally filter it per design area. # # Last updated: June 2022 # ################################################################################# import json import argparse import sys import requests # Get input arguments parser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results') parser.add_argument('--output-file', dest='output_file', action='store', help='You can optionally supply the name of the JSON file that will be created. Otherwise no output will be generated') parser.add_argument('--category', dest='category_filter', action='store', help='You can optionally provide a category name as a filter') parser.add_argument('--checklist-name', dest='new_checklist_name', action='store', default='Combined checklist', help='You can optionally provide a category name as a filter') parser.add_argument('--print-categories', dest='print_categories', action='store_true', default=False, help='print the categories of the combined checklist (default: False)') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() if args.category_filter: category_filter = args.category_filter.lower() # Variables repo_contents_url = 'https://api.github.com/repos/azure/review-checklists/contents/checklists' # Get existing checklists in the repo response = requests.get(repo_contents_url) # If download was successful if response.status_code == 200: if args.verbose: print ("DEBUG: Github contents downloaded successfully from {0}".format(repo_contents_url)) try: content_data = json.loads(response.text) except Exception as e: print("Error deserializing JSON content: {0}".format(str(e))) sys.exit(1) # Get the list of checklist files checklist_urls = [] if content_data: for github_object in content_data: if github_object['name'][-7:] == 'en.json': checklist_urls.append(github_object['download_url']) else: print("Error deserializing JSON content from GitHub repository contents: {0}".format(str(e))) sys.exit(1) if args.verbose: print("DEBUG: {0} checklists found".format(str(len(checklist_urls)))) # Load all of the items in memory new_checklist = { 'items': [], 'status': [ {'name': 'Not verified', 'description': 'This check has not been looked at yet'}, {'name': 'Open', 'description': 'There is an action item associated to this check'}, {'name': 'Fulfilled', 'description': 'This check has been verified, and there are no further action items associated to it'}, {'name': 'Not required', 'description': 'Recommendation understood, but not needed by current requirements'}, {'name': 'N/A', 'description': 'Not applicable for current design'} ], 'severities': [ {'name': 'High'}, {'name': 'Medium'}, {'name': 'Low'} ], 'categories': [], 'metadata': { 'name': args.new_checklist_name } } for checklist_url in checklist_urls: if args.verbose: print("DEBUG: Downloading checklist file from", checklist_url) response = requests.get(checklist_url) if response.status_code == 200: if args.verbose: print ("DEBUG: File {0} downloaded successfully".format(checklist_url)) try: # Deserialize JSON to object variable checklist_data = json.loads(response.text) checklist_name = checklist_data['metadata']['name'] for item in checklist_data['items']: if checklist_name: item['checklist'] = checklist_name item_category = str(item['category']).lower() if not args.category_filter or item_category.__contains__(category_filter): new_checklist['items'].append(item) except Exception as e: print("Error deserializing JSON content: {0}".format(str(e))) sys.exit(1) if args.verbose: print("DEBUG: Resulting combined checklist has {0} items".format(str(len(new_checklist['items'])))) # Add the categories to the new checklist categories = [] for item in new_checklist['items']: category_name=item['checklist'] + '/' + item['category'] if not category_name in categories: categories.append(category_name) if args.verbose: print("DEBUG: {0} categories found".format(str(len(categories)))) for category in categories: new_checklist['categories'].append({'name': category}) if args.print_categories: print(category) # Saving output file if specified in the argument if args.output_file: if args.verbose: print("DEBUG: saving output file to", args.output_file) new_checklist_string = json.dumps(new_checklist) with open(args.output_file, 'w', encoding='utf-8') as f: f.write(new_checklist_string) f.close() scripts/checklist_graph_update.py METASEP ################################################################################# # # This is a study on two libraries to update Excel files: openpyxl and xlwings # This exercise has shown that openpyxl breaks the xlsx files in this repo (maybe # because of the macros, or the formulae), while xlwings works fine. # # This script reads a previously generated JSON file with the results of Azure # Resource Graph queries, and stores them in the 'Comments' column of a # spreadsheet. Both the JSON file and the spreadsheet file are supplied as # parameters. # # Last updated: March 2022 # ################################################################################# import json import argparse import sys from pandas import DataFrame from openpyxl import load_workbook import xlwings as xw # Get input arguments parser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results') parser.add_argument('--graph-file', dest='graph_file', action='store', help='You need to supply a JSON file containing the results of Azure Resource Graph Queries') parser.add_argument('--excel-file', dest='excel_file', action='store', help='You need to supply an Excel file where the query results will be stored') parser.add_argument('--mode', dest='mode', action='store', default="openpyxl", help='It can be either xlwings or openpyxl (default is openpyxl)') parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='run in verbose mode (default: False)') args = parser.parse_args() graph_file = args.graph_file excel_file = args.excel_file mode = args.mode # Constants guid_column_index = "K" comment_column_index = "G" sample_cell_index = 'A4' # Get JSON try: with open(graph_file) as f: graph_data = json.load(f) except Exception as e: print("ERROR: Error when processing JSON file", graph_file, "-", str(e)) sys.exit(1) # Load workbook try: if mode == 'openpyxl': if args.verbose: print("DEBUG: working with openpyxl library") wb = load_workbook(filename = excel_file) ws = wb['Checklist'] elif mode == 'xlwings': if args.verbose: print("DEBUG: working with xlwings library") wb = xw.Book(excel_file) ws = wb.sheets['Checklist'] else: print("ERROR: mode {0} not recognized".format(mode)) except Exception as e: print("ERROR: Error when opening Excel file", excel_file, "-", str(e)) sys.exit(1) # Print specific cell if args.verbose: print("DEBUG: looking at spreadsheet for", ws[sample_cell_index].value) # Get GUID column into a list if mode == 'openpyxl': guid_col = ws[guid_column_index] guid_col_values = [x.value for x in guid_col] if args.verbose: print("DEBUG: GUID column retrieved with", str(len(guid_col_values)), "values") elif mode == 'xlwings': guid_col_values = ws.range(guid_column_index + ":" + guid_column_index).value if args.verbose: print("DEBUG: GUID column retrieved with", str(len(guid_col_values)), "values") else: print("ERROR: mode {0} not recognized".format(mode)) sys.exit(1) # Go over all checks in the JSON file for check in graph_data['checks']: guid = check['guid'] arm_id = check['id'] compliant = check['compliant'] if (compliant == "false"): comment = "Non-compliant: {0}\n".format(arm_id) elif (compliant == "true"): comment = "Compliant: {0}\n".format(arm_id) else: print("ERROR: compliant status {0} not recognized".format(compliant)) # Find the guid in the list if guid in guid_col_values: row = guid_col_values.index(guid) cell_index = comment_column_index + str(row) print("DEBUG: updating cell", cell_index) if mode == 'openpyxl': ws[cell_index] = comment elif mode == 'xlwings': ws.range(cell_index).value = comment else: print("ERROR: could not find GUID {0} in the Excel list".format(guid)) # Saving file if mode == 'openpyxl': print("DEBUG: saving workbook", excel_file) try: wb.save(excel_file) except Exception as e: print("ERROR: Error when saving Excel file", excel_file, "-", str(e)) sys.exit(1) elif mode == 'xlwings': print("DEBUG: saving workbook", excel_file) try: wb.save() except Exception as e: print("ERROR: Error when saving Excel file", excel_file, "-", str(e)) sys.exit(1) else: print("ERROR: mode {0} not recognized".format(mode)) scripts/create_master_checklist.py METASEP
[ { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r\n graph_query_failure = item.get(\"graph_failure\")\r\n # Update Excel\r\n ws[col_checklist + str(row_counter)].value = checklist_name\r\n ws[col_area + str(row_counter)].value = category\r\n ws[col_subarea + str(row_counter)].value = subcategory\r\n ws[col_check + str(row_counter)].value = text\r\n ws[col_desc + str(row_counter)].value = description\r\n ws[col_sev + str(row_counter)].value = severity\r\n ws[col_status + str(row_counter)].value = status\r\n ws[col_link + str(row_counter)].value = link\r\n # if link != None:\r\n # link_elements = link.split('#')\r\n # link_address = link_elements[0]\r\n # if len(link_elements) > 1:\r\n # link_subaddress = link_elements[1]\r\n # else:\r\n # link_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip=\"\", TextToDisplay=info_link_text)\r\n ws[col_training + str(row_counter)].value = training\r\n # if training != None:\r\n # training_elements = training.split('#')\r\n # training_address = training_elements[0]\r\n # if len(training_elements) > 1:\r\n # training_subaddress = training_elements[1]\r\n # else:\r\n # training_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip=\"\", TextToDisplay=training_link_text)\r\n # GUID and ARG queries\r\n ws[col_arg_success + str(row_counter)].value = graph_query_success\r\n ws[col_arg_failure + str(row_counter)].value = graph_query_failure\r\n ws[col_guid + str(row_counter)].value = guid\r\n # Next row\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n number_of_checks = row_counter - row1\r\n print(\"DEBUG:\", str(number_of_checks), \"checks addedd to Excel spreadsheet\")\r\n\r\n # Get worksheet\r\n try:\r\n wsv = wb[worksheet_values_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_values_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_values_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Update status\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"status\"):\r\n status = item.get(\"name\")\r\n description = item.get(\"description\")\r\n wsv[col_values_status + str(row_counter)].value = status\r\n wsv[col_values_description + str(row_counter)].value = description\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"statuses addedd to Excel spreadsheet\")\r\n\r\n # Update severities\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"severities\"):\r\n severity = item.get(\"name\")\r\n wsv[col_values_severity + str(row_counter)].value = severity\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"severities addedd to Excel spreadsheet\")\r\n\r\n # Data validation\r\n # dv = DataValidation(type=\"list\", formula1='=Values!$B$2:$B$6', allow_blank=True, showDropDown=True)\r\n dv = DataValidation(type=\"list\", formula1='=Values!$B$2:$B$6', allow_blank=True)\r\n rangevar = col_status + str(row1) +':' + col_status + str(row1 + number_of_checks)\r\n if args.verbose:\r\n print(\"DEBUG: adding data validation to range\", rangevar)\r\n dv.add(rangevar)\r\n ws.add_data_validation(dv)\r\n\r\n # Close book\r\n if args.verbose:\r\n print(\"DEBUG: saving workbook\", output_excel_file)\r\n try:\r\n wb.save(output_excel_file)\r\n except Exception as e:\r\n print(\"ERROR: Error when saving Excel file to\", output_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# Download checklist\r\nif args.input_folder:\r\n # Get consolidated checklist\r\n checklist_master_data = get_consolidated_checklist(args.input_folder, args.language)\r\n # Set output file variables\r\n xlsx_output_file = os.path.join(args.xlsx_output_folder, args.output_name + \".xlsx\")\r\n json_output_file = os.path.join(args.json_output_folder, args.output_name + \".json\")\r\n # Dump master checklist to JSON file\r", "type": "infile" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r\n graph_query_failure = item.get(\"graph_failure\")\r\n # Update Excel\r\n ws[col_checklist + str(row_counter)].value = checklist_name\r\n ws[col_area + str(row_counter)].value = category\r\n ws[col_subarea + str(row_counter)].value = subcategory\r\n ws[col_check + str(row_counter)].value = text\r\n ws[col_desc + str(row_counter)].value = description\r\n ws[col_sev + str(row_counter)].value = severity\r\n ws[col_status + str(row_counter)].value = status\r\n ws[col_link + str(row_counter)].value = link\r\n # if link != None:\r\n # link_elements = link.split('#')\r\n # link_address = link_elements[0]\r\n # if len(link_elements) > 1:\r\n # link_subaddress = link_elements[1]\r\n # else:\r\n # link_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip=\"\", TextToDisplay=info_link_text)\r\n ws[col_training + str(row_counter)].value = training\r\n # if training != None:\r\n # training_elements = training.split('#')\r\n # training_address = training_elements[0]\r\n # if len(training_elements) > 1:\r\n # training_subaddress = training_elements[1]\r\n # else:\r\n # training_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip=\"\", TextToDisplay=training_link_text)\r\n # GUID and ARG queries\r\n ws[col_arg_success + str(row_counter)].value = graph_query_success\r\n ws[col_arg_failure + str(row_counter)].value = graph_query_failure\r\n ws[col_guid + str(row_counter)].value = guid\r\n # Next row\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n number_of_checks = row_counter - row1\r\n print(\"DEBUG:\", str(number_of_checks), \"checks addedd to Excel spreadsheet\")\r\n\r\n # Get worksheet\r\n try:\r\n wsv = wb[worksheet_values_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_values_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_values_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Update status\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"status\"):\r\n status = item.get(\"name\")\r\n description = item.get(\"description\")\r\n wsv[col_values_status + str(row_counter)].value = status\r\n wsv[col_values_description + str(row_counter)].value = description\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"statuses addedd to Excel spreadsheet\")\r\n\r\n # Update severities\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"severities\"):\r\n severity = item.get(\"name\")\r\n wsv[col_values_severity + str(row_counter)].value = severity\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"severities addedd to Excel spreadsheet\")\r\n\r\n # Data validation\r\n # dv = DataValidation(type=\"list\", formula1='=Values!$B$2:$B$6', allow_blank=True, showDropDown=True)\r\n dv = DataValidation(type=\"list\", formula1='=Values!$B$2:$B$6', allow_blank=True)\r\n rangevar = col_status + str(row1) +':' + col_status + str(row1 + number_of_checks)\r\n if args.verbose:\r\n print(\"DEBUG: adding data validation to range\", rangevar)\r\n dv.add(rangevar)\r\n ws.add_data_validation(dv)\r\n\r\n # Close book\r\n if args.verbose:\r\n print(\"DEBUG: saving workbook\", output_excel_file)\r\n try:\r\n wb.save(output_excel_file)\r\n except Exception as e:\r\n print(\"ERROR: Error when saving Excel file to\", output_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# Download checklist\r\nif args.input_folder:\r\n # Get consolidated checklist\r\n checklist_master_data = get_consolidated_checklist(args.input_folder, args.language)\r\n # Set output file variables\r\n xlsx_output_file = os.path.join(args.xlsx_output_folder, args.output_name + \".xlsx\")\r\n json_output_file = os.path.join(args.json_output_folder, args.output_name + \".json\")\r\n # Dump master checklist to JSON file\r\n dump_json_file(checklist_master_data, json_output_file)\r\n # Update spreadsheet\r", "type": "infile" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r\n graph_query_failure = item.get(\"graph_failure\")\r\n # Update Excel\r\n ws[col_checklist + str(row_counter)].value = checklist_name\r\n ws[col_area + str(row_counter)].value = category\r\n ws[col_subarea + str(row_counter)].value = subcategory\r\n ws[col_check + str(row_counter)].value = text\r\n ws[col_desc + str(row_counter)].value = description\r\n ws[col_sev + str(row_counter)].value = severity\r\n ws[col_status + str(row_counter)].value = status\r\n ws[col_link + str(row_counter)].value = link\r\n # if link != None:\r\n # link_elements = link.split('#')\r\n # link_address = link_elements[0]\r\n # if len(link_elements) > 1:\r\n # link_subaddress = link_elements[1]\r\n # else:\r\n # link_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip=\"\", TextToDisplay=info_link_text)\r\n ws[col_training + str(row_counter)].value = training\r\n # if training != None:\r\n # training_elements = training.split('#')\r\n # training_address = training_elements[0]\r\n # if len(training_elements) > 1:\r\n # training_subaddress = training_elements[1]\r\n # else:\r\n # training_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip=\"\", TextToDisplay=training_link_text)\r\n # GUID and ARG queries\r\n ws[col_arg_success + str(row_counter)].value = graph_query_success\r\n ws[col_arg_failure + str(row_counter)].value = graph_query_failure\r\n ws[col_guid + str(row_counter)].value = guid\r\n # Next row\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n number_of_checks = row_counter - row1\r\n print(\"DEBUG:\", str(number_of_checks), \"checks addedd to Excel spreadsheet\")\r\n\r\n # Get worksheet\r\n try:\r\n wsv = wb[worksheet_values_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_values_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_values_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Update status\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"status\"):\r\n status = item.get(\"name\")\r\n description = item.get(\"description\")\r\n wsv[col_values_status + str(row_counter)].value = status\r\n wsv[col_values_description + str(row_counter)].value = description\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"statuses addedd to Excel spreadsheet\")\r\n\r\n # Update severities\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"severities\"):\r\n severity = item.get(\"name\")\r\n wsv[col_values_severity + str(row_counter)].value = severity\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"severities addedd to Excel spreadsheet\")\r\n\r\n # Data validation\r\n # dv = DataValidation(type=\"list\", formula1='=Values!$B$2:$B$6', allow_blank=True, showDropDown=True)\r\n dv = DataValidation(type=\"list\", formula1='=Values!$B$2:$B$6', allow_blank=True)\r\n rangevar = col_status + str(row1) +':' + col_status + str(row1 + number_of_checks)\r\n if args.verbose:\r\n print(\"DEBUG: adding data validation to range\", rangevar)\r\n dv.add(rangevar)\r\n ws.add_data_validation(dv)\r\n\r\n # Close book\r\n if args.verbose:\r\n print(\"DEBUG: saving workbook\", output_excel_file)\r\n try:\r\n wb.save(output_excel_file)\r\n except Exception as e:\r\n print(\"ERROR: Error when saving Excel file to\", output_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n########\r\n# Main #\r\n########\r\n\r\n# Download checklist\r\nif args.input_folder:\r\n # Get consolidated checklist\r", "type": "infile" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r\n graph_query_failure = item.get(\"graph_failure\")\r\n # Update Excel\r\n ws[col_checklist + str(row_counter)].value = checklist_name\r\n ws[col_area + str(row_counter)].value = category\r\n ws[col_subarea + str(row_counter)].value = subcategory\r\n ws[col_check + str(row_counter)].value = text\r\n ws[col_desc + str(row_counter)].value = description\r\n ws[col_sev + str(row_counter)].value = severity\r\n ws[col_status + str(row_counter)].value = status\r\n ws[col_link + str(row_counter)].value = link\r\n # if link != None:\r\n # link_elements = link.split('#')\r\n # link_address = link_elements[0]\r\n # if len(link_elements) > 1:\r\n # link_subaddress = link_elements[1]\r\n # else:\r\n # link_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip=\"\", TextToDisplay=info_link_text)\r\n ws[col_training + str(row_counter)].value = training\r\n # if training != None:\r\n # training_elements = training.split('#')\r\n # training_address = training_elements[0]\r\n # if len(training_elements) > 1:\r\n # training_subaddress = training_elements[1]\r\n # else:\r\n # training_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip=\"\", TextToDisplay=training_link_text)\r\n # GUID and ARG queries\r\n ws[col_arg_success + str(row_counter)].value = graph_query_success\r\n ws[col_arg_failure + str(row_counter)].value = graph_query_failure\r\n ws[col_guid + str(row_counter)].value = guid\r\n # Next row\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n number_of_checks = row_counter - row1\r\n print(\"DEBUG:\", str(number_of_checks), \"checks addedd to Excel spreadsheet\")\r\n\r\n # Get worksheet\r\n try:\r\n wsv = wb[worksheet_values_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_values_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_values_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Update status\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"status\"):\r\n status = item.get(\"name\")\r\n description = item.get(\"description\")\r\n wsv[col_values_status + str(row_counter)].value = status\r\n wsv[col_values_description + str(row_counter)].value = description\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"statuses addedd to Excel spreadsheet\")\r\n\r\n # Update severities\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"severities\"):\r\n severity = item.get(\"name\")\r\n wsv[col_values_severity + str(row_counter)].value = severity\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"severities addedd to Excel spreadsheet\")\r\n\r\n # Data validation\r\n # dv = DataValidation(type=\"list\", formula1='=Values!$B$2:$B$6', allow_blank=True, showDropDown=True)\r\n dv = DataValidation(type=\"list\", formula1='=Values!$B$2:$B$6', allow_blank=True)\r\n rangevar = col_status + str(row1) +':' + col_status + str(row1 + number_of_checks)\r\n if args.verbose:\r\n print(\"DEBUG: adding data validation to range\", rangevar)\r\n dv.add(rangevar)\r\n ws.add_data_validation(dv)\r\n\r\n # Close book\r\n if args.verbose:\r\n print(\"DEBUG: saving workbook\", output_excel_file)\r\n try:\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r\n graph_query_failure = item.get(\"graph_failure\")\r\n # Update Excel\r\n ws[col_checklist + str(row_counter)].value = checklist_name\r\n ws[col_area + str(row_counter)].value = category\r\n ws[col_subarea + str(row_counter)].value = subcategory\r\n ws[col_check + str(row_counter)].value = text\r\n ws[col_desc + str(row_counter)].value = description\r\n ws[col_sev + str(row_counter)].value = severity\r\n ws[col_status + str(row_counter)].value = status\r\n ws[col_link + str(row_counter)].value = link\r\n # if link != None:\r\n # link_elements = link.split('#')\r\n # link_address = link_elements[0]\r\n # if len(link_elements) > 1:\r\n # link_subaddress = link_elements[1]\r\n # else:\r\n # link_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip=\"\", TextToDisplay=info_link_text)\r\n ws[col_training + str(row_counter)].value = training\r\n # if training != None:\r\n # training_elements = training.split('#')\r\n # training_address = training_elements[0]\r\n # if len(training_elements) > 1:\r\n # training_subaddress = training_elements[1]\r\n # else:\r\n # training_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip=\"\", TextToDisplay=training_link_text)\r\n # GUID and ARG queries\r\n ws[col_arg_success + str(row_counter)].value = graph_query_success\r\n ws[col_arg_failure + str(row_counter)].value = graph_query_failure\r\n ws[col_guid + str(row_counter)].value = guid\r\n # Next row\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n number_of_checks = row_counter - row1\r\n print(\"DEBUG:\", str(number_of_checks), \"checks addedd to Excel spreadsheet\")\r\n\r\n # Get worksheet\r\n try:\r\n wsv = wb[worksheet_values_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_values_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_values_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Update status\r\n row_counter = values_row1\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r\n graph_query_failure = item.get(\"graph_failure\")\r\n # Update Excel\r\n ws[col_checklist + str(row_counter)].value = checklist_name\r\n ws[col_area + str(row_counter)].value = category\r\n ws[col_subarea + str(row_counter)].value = subcategory\r\n ws[col_check + str(row_counter)].value = text\r\n ws[col_desc + str(row_counter)].value = description\r\n ws[col_sev + str(row_counter)].value = severity\r\n ws[col_status + str(row_counter)].value = status\r\n ws[col_link + str(row_counter)].value = link\r\n # if link != None:\r\n # link_elements = link.split('#')\r\n # link_address = link_elements[0]\r\n # if len(link_elements) > 1:\r\n # link_subaddress = link_elements[1]\r\n # else:\r\n # link_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip=\"\", TextToDisplay=info_link_text)\r\n ws[col_training + str(row_counter)].value = training\r\n # if training != None:\r\n # training_elements = training.split('#')\r\n # training_address = training_elements[0]\r\n # if len(training_elements) > 1:\r\n # training_subaddress = training_elements[1]\r\n # else:\r\n # training_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip=\"\", TextToDisplay=training_link_text)\r\n # GUID and ARG queries\r\n ws[col_arg_success + str(row_counter)].value = graph_query_success\r\n ws[col_arg_failure + str(row_counter)].value = graph_query_failure\r\n ws[col_guid + str(row_counter)].value = guid\r\n # Next row\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n number_of_checks = row_counter - row1\r\n print(\"DEBUG:\", str(number_of_checks), \"checks addedd to Excel spreadsheet\")\r\n\r\n # Get worksheet\r\n try:\r\n wsv = wb[worksheet_values_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_values_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_values_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Update status\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"status\"):\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r\n graph_query_failure = item.get(\"graph_failure\")\r\n # Update Excel\r\n ws[col_checklist + str(row_counter)].value = checklist_name\r\n ws[col_area + str(row_counter)].value = category\r\n ws[col_subarea + str(row_counter)].value = subcategory\r\n ws[col_check + str(row_counter)].value = text\r\n ws[col_desc + str(row_counter)].value = description\r\n ws[col_sev + str(row_counter)].value = severity\r\n ws[col_status + str(row_counter)].value = status\r\n ws[col_link + str(row_counter)].value = link\r\n # if link != None:\r\n # link_elements = link.split('#')\r\n # link_address = link_elements[0]\r\n # if len(link_elements) > 1:\r\n # link_subaddress = link_elements[1]\r\n # else:\r\n # link_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip=\"\", TextToDisplay=info_link_text)\r\n ws[col_training + str(row_counter)].value = training\r\n # if training != None:\r\n # training_elements = training.split('#')\r\n # training_address = training_elements[0]\r\n # if len(training_elements) > 1:\r\n # training_subaddress = training_elements[1]\r\n # else:\r\n # training_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip=\"\", TextToDisplay=training_link_text)\r\n # GUID and ARG queries\r\n ws[col_arg_success + str(row_counter)].value = graph_query_success\r\n ws[col_arg_failure + str(row_counter)].value = graph_query_failure\r\n ws[col_guid + str(row_counter)].value = guid\r\n # Next row\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n number_of_checks = row_counter - row1\r\n print(\"DEBUG:\", str(number_of_checks), \"checks addedd to Excel spreadsheet\")\r\n\r\n # Get worksheet\r\n try:\r\n wsv = wb[worksheet_values_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_values_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_values_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Update status\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"status\"):\r\n status = item.get(\"name\")\r\n description = item.get(\"description\")\r\n wsv[col_values_status + str(row_counter)].value = status\r\n wsv[col_values_description + str(row_counter)].value = description\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"statuses addedd to Excel spreadsheet\")\r\n\r\n # Update severities\r\n row_counter = values_row1\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r\n graph_query_failure = item.get(\"graph_failure\")\r\n # Update Excel\r\n ws[col_checklist + str(row_counter)].value = checklist_name\r\n ws[col_area + str(row_counter)].value = category\r\n ws[col_subarea + str(row_counter)].value = subcategory\r\n ws[col_check + str(row_counter)].value = text\r\n ws[col_desc + str(row_counter)].value = description\r\n ws[col_sev + str(row_counter)].value = severity\r\n ws[col_status + str(row_counter)].value = status\r\n ws[col_link + str(row_counter)].value = link\r\n # if link != None:\r\n # link_elements = link.split('#')\r\n # link_address = link_elements[0]\r\n # if len(link_elements) > 1:\r\n # link_subaddress = link_elements[1]\r\n # else:\r\n # link_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip=\"\", TextToDisplay=info_link_text)\r\n ws[col_training + str(row_counter)].value = training\r\n # if training != None:\r\n # training_elements = training.split('#')\r\n # training_address = training_elements[0]\r\n # if len(training_elements) > 1:\r\n # training_subaddress = training_elements[1]\r\n # else:\r\n # training_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip=\"\", TextToDisplay=training_link_text)\r\n # GUID and ARG queries\r\n ws[col_arg_success + str(row_counter)].value = graph_query_success\r\n ws[col_arg_failure + str(row_counter)].value = graph_query_failure\r\n ws[col_guid + str(row_counter)].value = guid\r\n # Next row\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n number_of_checks = row_counter - row1\r\n print(\"DEBUG:\", str(number_of_checks), \"checks addedd to Excel spreadsheet\")\r\n\r\n # Get worksheet\r\n try:\r\n wsv = wb[worksheet_values_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_values_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_values_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Update status\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"status\"):\r\n status = item.get(\"name\")\r\n description = item.get(\"description\")\r\n wsv[col_values_status + str(row_counter)].value = status\r\n wsv[col_values_description + str(row_counter)].value = description\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"statuses addedd to Excel spreadsheet\")\r\n\r\n # Update severities\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"severities\"):\r", "type": "common" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r", "type": "non_informative" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r\n graph_query_failure = item.get(\"graph_failure\")\r\n # Update Excel\r\n ws[col_checklist + str(row_counter)].value = checklist_name\r\n ws[col_area + str(row_counter)].value = category\r\n ws[col_subarea + str(row_counter)].value = subcategory\r\n ws[col_check + str(row_counter)].value = text\r\n ws[col_desc + str(row_counter)].value = description\r\n ws[col_sev + str(row_counter)].value = severity\r\n ws[col_status + str(row_counter)].value = status\r\n ws[col_link + str(row_counter)].value = link\r\n # if link != None:\r\n # link_elements = link.split('#')\r\n # link_address = link_elements[0]\r\n # if len(link_elements) > 1:\r\n # link_subaddress = link_elements[1]\r\n # else:\r\n # link_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip=\"\", TextToDisplay=info_link_text)\r\n ws[col_training + str(row_counter)].value = training\r\n # if training != None:\r\n # training_elements = training.split('#')\r\n # training_address = training_elements[0]\r\n # if len(training_elements) > 1:\r\n # training_subaddress = training_elements[1]\r\n # else:\r\n # training_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip=\"\", TextToDisplay=training_link_text)\r\n # GUID and ARG queries\r\n ws[col_arg_success + str(row_counter)].value = graph_query_success\r\n ws[col_arg_failure + str(row_counter)].value = graph_query_failure\r\n ws[col_guid + str(row_counter)].value = guid\r\n # Next row\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n number_of_checks = row_counter - row1\r\n print(\"DEBUG:\", str(number_of_checks), \"checks addedd to Excel spreadsheet\")\r\n\r\n # Get worksheet\r\n try:\r\n wsv = wb[worksheet_values_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_values_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_values_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Update status\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"status\"):\r\n status = item.get(\"name\")\r\n description = item.get(\"description\")\r\n wsv[col_values_status + str(row_counter)].value = status\r\n wsv[col_values_description + str(row_counter)].value = description\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"statuses addedd to Excel spreadsheet\")\r\n\r\n # Update severities\r\n row_counter = values_row1\r\n for item in checklist_data.get(\"severities\"):\r\n severity = item.get(\"name\")\r\n wsv[col_values_severity + str(row_counter)].value = severity\r\n row_counter += 1\r\n\r\n # Display summary\r\n if args.verbose:\r\n print(\"DEBUG:\", str(row_counter - values_row1), \"severities addedd to Excel spreadsheet\")\r\n\r\n # Data validation\r\n # dv = DataValidation(type=\"list\", formula1='=Values!$B$2:$B$6', allow_blank=True, showDropDown=True)\r\n dv = DataValidation(type=\"list\", formula1='=Values!$B$2:$B$6', allow_blank=True)\r\n rangevar = col_status + str(row1) +':' + col_status + str(row1 + number_of_checks)\r\n if args.verbose:\r\n print(\"DEBUG: adding data validation to range\", rangevar)\r\n dv.add(rangevar)\r\n ws.add_data_validation(dv)\r\n\r\n # Close book\r\n if args.verbose:\r", "type": "non_informative" }, { "content": "", "type": "non_informative" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r\n graph_query_failure = item.get(\"graph_failure\")\r\n # Update Excel\r\n ws[col_checklist + str(row_counter)].value = checklist_name\r\n ws[col_area + str(row_counter)].value = category\r\n ws[col_subarea + str(row_counter)].value = subcategory\r\n ws[col_check + str(row_counter)].value = text\r\n ws[col_desc + str(row_counter)].value = description\r\n ws[col_sev + str(row_counter)].value = severity\r\n ws[col_status + str(row_counter)].value = status\r\n ws[col_link + str(row_counter)].value = link\r\n # if link != None:\r\n # link_elements = link.split('#')\r\n # link_address = link_elements[0]\r\n # if len(link_elements) > 1:\r\n # link_subaddress = link_elements[1]\r\n # else:\r\n # link_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip=\"\", TextToDisplay=info_link_text)\r\n ws[col_training + str(row_counter)].value = training\r\n # if training != None:\r\n # training_elements = training.split('#')\r\n # training_address = training_elements[0]\r\n # if len(training_elements) > 1:\r\n # training_subaddress = training_elements[1]\r", "type": "non_informative" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r", "type": "non_informative" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r\n ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r\n if args.verbose:\r\n print(\"DEBUG: starting filling the Excel spreadsheet with the values of checklist '{0}'\".format(checklist_data[\"metadata\"][\"name\"]))\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get default status from the JSON, default to \"Not verified\"\r\n try:\r\n status_list = checklist_data.get(\"status\")\r\n default_status = status_list[0].get(\"name\")\r\n if args.verbose:\r\n print (\"DEBUG: default status retrieved from checklist: '{0}'\".format(default_status))\r\n except:\r\n default_status = \"Not verified\"\r\n if args.verbose:\r\n print (\"DEBUG: Using default status 'Not verified'\")\r\n pass\r\n\r\n # For each checklist item, add a row to spreadsheet\r\n row_counter = row1\r\n for item in checklist_data.get(\"items\"):\r\n # Read variables from JSON\r\n checklist_name = item.get(\"checklist\")\r\n guid = item.get(\"guid\")\r\n category = item.get(\"category\")\r\n subcategory = item.get(\"subcategory\")\r\n text = item.get(\"text\")\r\n description = item.get(\"description\")\r\n severity = item.get(\"severity\")\r\n link = item.get(\"link\")\r\n training = item.get(\"training\")\r\n status = default_status\r\n graph_query_success = item.get(\"graph_success\")\r\n graph_query_failure = item.get(\"graph_failure\")\r\n # Update Excel\r\n ws[col_checklist + str(row_counter)].value = checklist_name\r\n ws[col_area + str(row_counter)].value = category\r\n ws[col_subarea + str(row_counter)].value = subcategory\r\n ws[col_check + str(row_counter)].value = text\r\n ws[col_desc + str(row_counter)].value = description\r\n ws[col_sev + str(row_counter)].value = severity\r\n ws[col_status + str(row_counter)].value = status\r\n ws[col_link + str(row_counter)].value = link\r\n # if link != None:\r\n # link_elements = link.split('#')\r\n # link_address = link_elements[0]\r\n # if len(link_elements) > 1:\r\n # link_subaddress = link_elements[1]\r\n # else:\r\n # link_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_link + str(row_counter)].api, Address=link_address, SubAddress=link_subaddress, ScreenTip=\"\", TextToDisplay=info_link_text)\r\n ws[col_training + str(row_counter)].value = training\r\n # if training != None:\r\n # training_elements = training.split('#')\r\n # training_address = training_elements[0]\r\n # if len(training_elements) > 1:\r\n # training_subaddress = training_elements[1]\r\n # else:\r\n # training_subaddress = \"\"\r\n # ws.api.Hyperlinks.Add (Anchor=ws[col_training + str(row_counter)].api, Address=training_address, SubAddress=training_subaddress, ScreenTip=\"\", TextToDisplay=training_link_text)\r\n # GUID and ARG queries\r\n ws[col_arg_success + str(row_counter)].value = graph_query_success\r\n ws[col_arg_failure + str(row_counter)].value = graph_query_failure\r", "type": "random" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r", "type": "random" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r", "type": "random" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r", "type": "random" }, { "content": "######################################################################\r\n#\r\n# This script combines all of the existing checklists into one big\r\n# checklist, and saves it in JSON and XLSX (macrofree) formats.\r\n#\r\n# Example usage:\r\n# python3 ./scripts/create_master_checklist.py \\\r\n# --input-folder=\"./checklists\" \\\r\n# --language=\"en\" \\\r\n# --excel-file=\"./spreadsheet/macrofree/review_checklist_master_empty.xlsx\" \\\r\n# --output-name=\"checklist.en.master\" \\\r\n# --json-output-folder=\"./checklists/\" \\\r\n# --xlsx-output-folder=\"./spreadsheet/macrofree/\"\r\n# \r\n# Last updated: March 2022\r\n#\r\n######################################################################\r\n\r\nimport json\r\nimport argparse\r\nimport sys\r\nimport os\r\nimport requests\r\nimport glob\r\nimport datetime\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.worksheet.datavalidation import DataValidation\r\n\r\n# Get input arguments\r\nparser = argparse.ArgumentParser(description='Update a checklist spreadsheet with JSON-formatted Azure Resource Graph results')\r\nparser.add_argument('--input-folder', dest='input_folder', action='store',\r\n help='Input folder where the checklists to merge are stored')\r\nparser.add_argument('--language', dest='language', action='store', default='en',\r\n help='if checklist files are specified, ignore the non-English ones and only generate a spreadsheet for the English version (default: False)')\r\nparser.add_argument('--excel-file', dest='excel_file', action='store',\r\n help='You need to supply an Excel file that will be taken as template to create the XLSX file with the checklist')\r\nparser.add_argument('--json-output-folder', dest='json_output_folder', action='store',\r\n help='Folder where to store the JSON output')\r\nparser.add_argument('--xlsx-output-folder', dest='xlsx_output_folder', action='store',\r\n help='Folder where to store the macro free Excel output')\r\nparser.add_argument('--output-name', dest='output_name', action='store',\r\n help='File name (without extension) for the output files (.json and .xlsx extensions will be added automatically)')\r\nparser.add_argument('--verbose', dest='verbose', action='store_true',\r\n default=False,\r\n help='run in verbose mode (default: False)')\r\nargs = parser.parse_args()\r\n\r\n# Consolidate all checklists into one big checklist object\r\ndef get_consolidated_checklist(input_folder, language):\r\n # Initialize checklist object\r\n checklist_master_data = {\r\n 'items': [],\r\n 'metadata': {\r\n 'name': 'Master checklist',\r\n 'timestamp': datetime.date.today().strftime(\"%B %d, %Y\")\r\n }\r\n }\r\n # Find all files in the input folder matching the pattern \"language*.json\"\r\n if args.verbose:\r\n print(\"DEBUG: looking for JSON files in folder\", input_folder, \"with pattern *.\", language + \".json...\")\r\n checklist_files = glob.glob(input_folder + \"/*.\" + language + \".json\")\r\n if args.verbose:\r\n print(\"DEBUG: found\", len(checklist_files), \"JSON files\")\r\n for checklist_file in checklist_files:\r\n # Get JSON\r\n try:\r\n with open(checklist_file) as f:\r\n checklist_data = json.load(f)\r\n if args.verbose:\r\n print(\"DEBUG: JSON file\", checklist_file, \"loaded successfully with {0} items\".format(len(checklist_data[\"items\"])))\r\n for item in checklist_data[\"items\"]:\r\n # Add field with the name of the checklist\r\n item[\"checklist\"] = checklist_data[\"metadata\"][\"name\"]\r\n # Add items to the master checklist\r\n checklist_master_data['items'] += checklist_data['items']\r\n # Replace the master checklist severities and status sections (for a given language they should be all the same)\r\n checklist_master_data['severities'] = checklist_data['severities']\r\n checklist_master_data['status'] = checklist_data['status']\r\n except Exception as e:\r\n print(\"ERROR: Error when processing JSON file\", checklist_file, \"-\", str(e))\r\n if args.verbose:\r\n print(\"DEBUG: master checklist contains\", len(checklist_master_data[\"items\"]), \"items\")\r\n return checklist_master_data\r\n\r\n# Dump JSON object to file\r\ndef dump_json_file(json_object, filename):\r\n if args.verbose:\r\n print(\"DEBUG: dumping JSON object to file\", filename)\r\n json_string = json.dumps(json_object, sort_keys=True, ensure_ascii=False, indent=4, separators=(',', ': '))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n f.write(json_string)\r\n f.close()\r\n\r\n# Main function\r\ndef update_excel_file(input_excel_file, output_excel_file, checklist_data):\r\n # Constants\r\n worksheet_checklist_name = 'Checklist'\r\n row1 = 8 # First row after which the Excel spreadsheet will be updated\r\n col_checklist_name = \"A\"\r\n row_checklist_name = \"4\"\r\n guid_column_index = \"L\"\r\n comment_column_index = \"G\"\r\n sample_cell_index = 'A4'\r\n col_checklist=\"A\"\r\n col_area = \"B\"\r\n col_subarea = \"C\"\r\n col_check = \"D\"\r\n col_desc = \"E\"\r\n col_sev = \"F\"\r\n col_status = \"G\"\r\n col_comment = \"H\"\r\n col_link = \"I\"\r\n col_training = \"J\"\r\n col_arg_success = \"K\"\r\n col_arg_failure = \"L\"\r\n col_guid = \"M\"\r\n info_link_text = 'More info'\r\n training_link_text = 'Training'\r\n worksheet_values_name = 'Values'\r\n values_row1 = 2\r\n col_values_severity = \"A\"\r\n col_values_status = \"B\"\r\n col_values_area = \"C\"\r\n col_values_description = \"H\"\r\n\r\n # Load workbook\r\n try:\r\n wb = load_workbook(filename = input_excel_file)\r\n if args.verbose:\r\n print(\"DEBUG: workbook\", input_excel_file, \"opened successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when opening Excel file\", input_excel_file, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Get worksheet\r\n try:\r\n ws = wb[worksheet_checklist_name]\r\n if args.verbose:\r\n print(\"DEBUG: worksheet\", worksheet_checklist_name, \"selected successfully\")\r\n except Exception as e:\r\n print(\"ERROR: Error when selecting worksheet\", worksheet_checklist_name, \"-\", str(e))\r\n sys.exit(1)\r\n\r\n # Set checklist name\r\n try:\r", "type": "random" } ]
[ " dump_json_file(checklist_master_data, json_output_file)\r", " update_excel_file(args.excel_file, xlsx_output_file, checklist_master_data)\r", " checklist_master_data = get_consolidated_checklist(args.input_folder, args.language)\r", " wb.save(output_excel_file)\r", " status_list = checklist_data.get(\"status\")\r", " default_status = status_list[0].get(\"name\")\r", " for item in checklist_data.get(\"items\"):\r", " checklist_name = item.get(\"checklist\")\r", " guid = item.get(\"guid\")\r", " category = item.get(\"category\")\r", " subcategory = item.get(\"subcategory\")\r", " text = item.get(\"text\")\r", " description = item.get(\"description\")\r", " severity = item.get(\"severity\")\r", " link = item.get(\"link\")\r", " training = item.get(\"training\")\r", " graph_query_success = item.get(\"graph_success\")\r", " graph_query_failure = item.get(\"graph_failure\")\r", " checklist_data = json.load(f)\r", " for item in checklist_data.get(\"status\"):\r", " status = item.get(\"name\")\r", " for item in checklist_data.get(\"severities\"):\r", " severity = item.get(\"name\")\r", " # Get worksheet\r", " print(\"DEBUG: saving workbook\", output_excel_file)\r", "######################################################################\r", " # else:\r", " # Replace the master checklist severities and status sections (for a given language they should be all the same)\r", " ws[col_guid + str(row_counter)].value = guid\r", " guid_column_index = \"L\"\r", " col_subarea = \"C\"\r", "parser.add_argument('--language', dest='language', action='store', default='en',\r", " ws[col_checklist_name + row_checklist_name] = checklist_data[\"metadata\"][\"name\"]\r" ]
METASEP
16
qiboteam__qibocal
qiboteam__qibocal METASEP doc/source/conf.py METASEP # -*- coding: utf-8 -*- # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys from recommonmark.transform import AutoStructify sys.path.insert(0, os.path.abspath("..")) import qcvv # -- Project information ----------------------------------------------------- project = "qcvv" copyright = "2022, The Qibo team" author = "The Qibo team" # The full version, including alpha/beta/rc tags release = qcvv.__version__ # -- General configuration --------------------------------------------------- # https://stackoverflow.com/questions/56336234/build-fail-sphinx-error-contents-rst-not-found master_doc = "index" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.intersphinx", "recommonmark", "sphinx_markdown_tables", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # Markdown configuration # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = {".rst": "restructuredtext", ".txt": "markdown", ".md": "markdown"} autosectionlabel_prefix_document = True # Allow to embed rst syntax in markdown files. enable_eval_rst = True # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # -- Intersphinx ------------------------------------------------------------- intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} # -- Autodoc ------------------------------------------------------------------ # autodoc_member_order = "bysource" # Adapted this from # https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/docs/conf.py # app setup hook def setup(app): app.add_config_value("recommonmark_config", {"enable_eval_rst": True}, True) app.add_transform(AutoStructify) serverscripts/qcvv-update-on-change.py METASEP #!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import curio import inotify.adapters import inotify.constants from curio import subprocess async def main(folder, exe_args): i = inotify.adapters.Inotify() i.add_watch(folder) for event in i.event_gen(yield_nones=False): if event is not None: (header, _, _, _) = event if ( (header.mask & inotify.constants.IN_CREATE) or (header.mask & inotify.constants.IN_DELETE) or (header.mask & inotify.constants.IN_MODIFY) ): await subprocess.run(exe_args) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("folder") parser.add_argument("exe_args", nargs="+") args = parser.parse_args() curio.run(main(args.folder, args.exe_args)) serverscripts/qcvv-index.reports.py METASEP # -*- coding: utf-8 -*- """qcvv-index-reports.py Generates a JSON index with reports information. """ import json import pathlib import sys from collections import ChainMap import yaml ROOT = "/home/users/qcvv/qcvv-reports" ROOT_URL = "http://login.qrccluster.com:9000/" OUT = "/home/users/qcvv/qcvv-reports/index.json" DEFAULTS = { "title": "-", "date": "-", "platform": "-", "start-time": "-", "end-time": "-", } REQUIRED_FILE_METADATA = {"title", "date", "platform", "start-time" "end-time"} def meta_from_path(p): meta = ChainMap(DEFAULTS) yaml_meta = p / "meta.yml" yaml_res = {} if yaml_meta.exists(): with yaml_meta.open() as f: try: yaml_res = yaml.safe_load(f) except yaml.YAMLError as e: print(f"Error processing {yaml_meta}: {e}", file=sys.stderr) meta = meta.new_child(yaml_res) return meta def register(p): path_meta = meta_from_path(p) title, date, platform, start_time, end_time = ( path_meta["title"], path_meta["date"], path_meta["platform"], path_meta["start-time"], path_meta["end-time"], ) url = ROOT_URL + p.name titlelink = f'<a href="{url}">{title}</a>' return (titlelink, date, platform, start_time, end_time) def make_index(): root_path = pathlib.Path(ROOT) data = [] for p in root_path.iterdir(): if p.is_dir(): try: res = register(p) data.append(res) except: print("Error processing folder", p, file=sys.stderr) raise with open(OUT, "w") as f: json.dump({"data": data}, f) if __name__ == "__main__": make_index() src/qcvv/web/server.py METASEP # -*- coding: utf-8 -*- import os import pathlib import yaml from flask import Flask, render_template from qcvv import __version__ from qcvv.cli.builders import ReportBuilder server = Flask(__name__) @server.route("/") @server.route("/data/<path>") def page(path=None): folders = [ folder for folder in reversed(sorted(os.listdir(os.getcwd()))) if os.path.isdir(folder) and "meta.yml" in os.listdir(folder) ] report = None if path is not None: try: report = ReportBuilder(path) except (FileNotFoundError, TypeError): pass return render_template( "template.html", version=__version__, folders=folders, report=report, ) src/qcvv/web/report.py METASEP # -*- coding: utf-8 -*- import os import pathlib from jinja2 import Environment, FileSystemLoader from qcvv import __version__ from qcvv.cli.builders import ReportBuilder def create_report(path): """Creates an HTML report for the data in the given path.""" filepath = pathlib.Path(__file__) with open(os.path.join(filepath.with_name("static"), "styles.css"), "r") as file: css_styles = f"<style>\n{file.read()}\n</style>" report = ReportBuilder(path) env = Environment(loader=FileSystemLoader(filepath.with_name("templates"))) template = env.get_template("template.html") html = template.render( is_static=True, css_styles=css_styles, version=__version__, report=report, ) with open(os.path.join(path, "index.html"), "w") as file: file.write(html) src/qcvv/web/app.py METASEP # -*- coding: utf-8 -*- import os import pandas as pd import yaml from dash import Dash, Input, Output, dcc, html from qcvv import plots from qcvv.data import Dataset from qcvv.web.server import server Dataset() # dummy dataset call to suppress ``pint[V]`` error app = Dash( server=server, suppress_callback_exceptions=True, ) app.layout = html.Div( [ dcc.Location(id="url", refresh=False), dcc.Graph(id="graph", figure={}), dcc.Interval( id="interval", # TODO: Perhaps the user should be allowed to change the refresh rate interval=1000, n_intervals=0, disabled=False, ), ] ) @app.callback( Output("graph", "figure"), Input("interval", "n_intervals"), Input("graph", "figure"), Input("url", "pathname"), ) def get_graph(n, current_figure, url): method, folder, routine, qubit, format = url.split(os.sep)[2:] try: # data = Dataset.load_data(folder, routine, format, "precision_sweep") # with open(f"{folder}/platform.yml", "r") as f: # nqubits = yaml.safe_load(f)["nqubits"] # if len(data) > 2: # params, fit = resonator_spectroscopy_fit(folder, format, nqubits) # else: # params, fit = None, None # return getattr(plots.resonator_spectroscopy, method)(data, params, fit) # # FIXME: Temporarily hardcode the plotting method to test # # multiple routines with different names in one folder # # should be changed to: # # return getattr(getattr(plots, routine), method)(data) return getattr(plots, method)(folder, routine, qubit, format) except (FileNotFoundError, pd.errors.EmptyDataError): return current_figure src/qcvv/web/__init__.py METASEP src/qcvv/tests/test_data.py METASEP # -*- coding: utf-8 -*- """Some tests for the Dataset class""" import tempfile import numpy as np import pytest from pint import DimensionalityError, UndefinedUnitError from qcvv.data import Dataset def random_dataset(length): data = Dataset() for _ in range(length): msr, i, q, phase = np.random.rand(len(data.df.columns)) data.add({"MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) return data def test_data_initialization(): """Test Dataset constructor""" data = Dataset() assert len(data.df.columns) == 4 assert list(data.df.columns) == ["MSR", "i", "q", "phase"] data1 = Dataset(quantities={"attenuation": "dB"}) assert len(data1.df.columns) == 5 assert list(data1.df.columns) == ["attenuation", "MSR", "i", "q", "phase"] def test_units(): """Test units of measure in Dataset""" data = Dataset() assert data.df.MSR.values.units == "volt" data1 = Dataset(quantities={"frequency": "Hz"}) assert data1.df.frequency.values.units == "hertz" with pytest.raises(UndefinedUnitError): data2 = Dataset(quantities={"fake_unit": "fake"}) def test_add(): """Test add method of Dataset""" data = random_dataset(5) assert len(data) == 5 data1 = Dataset(quantities={"attenuation": "dB"}) msr, i, q, phase, att = np.random.rand(len(data1.df.columns)) data1.add( { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "attenuation[dB]": att, } ) assert len(data1) == 1 data1.add( { "MSR[V]": 0, "i[V]": 0.0, "q[V]": 0.0, "phase[deg]": 0, "attenuation[dB]": 1, } ) assert len(data1) == 2 data2 = Dataset() msr, i, q, phase = np.random.rand(len(data2.df.columns)) with pytest.raises(DimensionalityError): data2.add({"MSR[dB]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) with pytest.raises(UndefinedUnitError): data2.add({"MSR[test]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) src/qcvv/plots/scatters.py METASEP # -*- coding: utf-8 -*- import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots from qcvv.data import Data, Dataset from qcvv.fitting.utils import exp, flipping, lorenzian, rabi, ramsey def frequency_msr_phase__fast_precision(folder, routine, qubit, format): try: data_fast = Dataset.load_data(folder, routine, format, f"fast_sweep_q{qubit}") except: data_fast = Dataset(quantities={"frequency": "Hz"}) try: data_precision = Dataset.load_data( folder, routine, format, f"precision_sweep_q{qubit}" ) except: data_precision = Dataset(quantities={"frequency": "Hz"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Data( quantities=[ "fit_amplitude", "fit_center", "fit_sigma", "fit_offset", "label1", "label2", ] ) fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data_fast.get_values("frequency", "GHz"), y=data_fast.get_values("MSR", "uV"), name="Fast", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_fast.get_values("frequency", "GHz"), y=data_fast.get_values("phase", "rad"), name="Fast", ), row=1, col=2, ) fig.add_trace( go.Scatter( x=data_precision.get_values("frequency", "GHz"), y=data_precision.get_values("MSR", "uV"), name="Precision", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_precision.get_values("frequency", "GHz"), y=data_precision.get_values("phase", "rad"), name="Precision", ), row=1, col=2, ) if len(data_fast) > 0 and len(data_fit) > 0: freqrange = np.linspace( min(data_fast.get_values("frequency", "GHz")), max(data_fast.get_values("frequency", "GHz")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=freqrange, y=lorenzian( freqrange, data_fit.df["fit_amplitude"][0], data_fit.df["fit_center"][0], data_fit.df["fit_sigma"][0], data_fit.df["fit_offset"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="MSR (uV)", xaxis2_title="Frequency (GHz)", yaxis2_title="Phase (rad)", ) return fig def frequency_attenuation_msr_phase__cut(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot fig = go.Figure() # index data on a specific attenuation value smalldf = data.df[data.get_values("attenuation", "dB") == plot1d_attenuation].copy() # split multiple software averages to different datasets datasets = [] while len(smalldf): datasets.append(smalldf.drop_duplicates("frequency")) smalldf.drop(datasets[-1].index, inplace=True) fig.add_trace( go.Scatter( x=datasets[-1]["frequency"].pint.to("GHz").pint.magnitude, y=datasets[-1]["MSR"].pint.to("V").pint.magnitude, ), ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting, xaxis_title="Frequency (GHz)", yaxis_title="MSR (V)", ) return fig # For Rabi oscillations def time_msr_phase(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(quantities={"Time": "ns"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("MSR", "uV"), name="Rabi Oscillations", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("phase", "rad"), name="Rabi Oscillations", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("Time", "ns")), max(data.get_values("Time", "ns")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=rabi( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], data_fit.df["popt3"][0], data_fit.df["popt4"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) # add annotation for label[0] -> pi_pulse_duration fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} ns.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # add annotation for label[0] -> rabi_oscillations_pi_pulse_max_voltage fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # add annotation for label[0] -> rabi_oscillations_pi_pulse_max_voltage fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[2]} is {data_fit.df[params[2]][0]:.1f} ns.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # last part fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Time (ns)", yaxis_title="MSR (uV)", xaxis2_title="Time (ns)", yaxis2_title="Phase (rad)", ) return fig def gain_msr_phase(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(quantities={"gain", "dimensionless"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("gain", "dimensionless"), y=data.get_values("MSR", "uV"), name="Rabi Oscillations", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("gain", "dimensionless"), y=data.get_values("phase", "rad"), name="Rabi Oscillations", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("gain", "dimensionless")), max(data.get_values("gain", "dimensionless")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=rabi( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], data_fit.df["popt3"][0], data_fit.df["popt4"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # add annotation for label[0] -> pi_pulse_gain fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[2]} is {data_fit.df[params[2]][0]:.1f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Gain (dimensionless)", yaxis_title="MSR (uV)", ) return fig def amplitude_msr_phase(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(quantities={"amplitude", "dimensionless"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("amplitude", "dimensionless"), y=data.get_values("MSR", "uV"), name="Rabi Oscillations", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("amplitude", "dimensionless"), y=data.get_values("phase", "rad"), name="Rabi Oscillations", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("amplitude", "dimensionless")), max(data.get_values("amplitude", "dimensionless")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=rabi( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], data_fit.df["popt3"][0], data_fit.df["popt4"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # add annotation for label[0] -> pi_pulse_gain fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[2]} is {data_fit.df[params[2]][0]:.1f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Amplitude (dimensionless)", yaxis_title="MSR (uV)", ) return fig # For Ramsey oscillations def time_msr(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=("MSR (V)",), ) fig.add_trace( go.Scatter( x=data.get_values("wait", "ns"), y=data.get_values("MSR", "uV"), name="Ramsey", ), row=1, col=1, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("wait", "ns")), max(data.get_values("wait", "ns")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=ramsey( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], data_fit.df["popt3"][0], data_fit.df["popt4"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} Hz.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} ns", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"Estimated {params[2]} is {data_fit.df[params[2]][0]:.3f} Hz", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Time (ns)", yaxis_title="MSR (uV)", ) return fig # T1 def t1_time_msr_phase(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(quantities={"Time": "ns"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("MSR", "uV"), name="T1", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("phase", "rad"), name="T1", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("Time", "ns")), max(data.get_values("Time", "ns")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=exp( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} ns.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # last part fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Time (ns)", yaxis_title="MSR (uV)", xaxis2_title="Time (ns)", yaxis2_title="Phase (rad)", ) return fig # Flipping def flips_msr_phase(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(quantities={"flips": "dimensionless"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("flips", "dimensionless"), y=data.get_values("MSR", "uV"), name="T1", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("flips", "dimensionless"), y=data.get_values("phase", "rad"), name="T1", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("flips", "dimensionless")), max(data.get_values("flips", "dimensionless")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=flipping( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], data_fit.df["popt3"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.4f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # last part fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Flips (dimensionless)", yaxis_title="MSR (uV)", xaxis2_title="Flips (dimensionless)", yaxis2_title="Phase (rad)", ) return fig # For calibrate qubit states def exc_gnd(folder, routine, qubit, format): import os.path file_exc = f"{folder}/data/{routine}/data_exc_q{qubit}.csv" if os.path.exists(file_exc): data_exc = Dataset.load_data(folder, routine, format, f"data_exc_q{qubit}") fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=("Calibrate qubit states",), ) fig.add_trace( go.Scatter( x=data_exc.get_values("i", "V"), y=data_exc.get_values("q", "V"), name="exc_state", mode="markers", marker=dict(size=3, color="lightcoral"), ), row=1, col=1, ) file_gnd = f"{folder}/data/{routine}/data_gnd_q{qubit}.csv" if os.path.exists(file_gnd): data_gnd = Dataset.load_data(folder, routine, format, f"data_gnd_q{qubit}") fig.add_trace( go.Scatter( x=data_gnd.get_values("i", "V"), y=data_gnd.get_values("q", "V"), name="gnd state", mode="markers", marker=dict(size=3, color="skyblue"), ), row=1, col=1, ) file_exc = f"{folder}/data/{routine}/data_exc_q{qubit}.csv" if os.path.exists(file_exc): i_exc = data_exc.get_values("i", "V") q_exc = data_exc.get_values("q", "V") i_mean_exc = i_exc.mean() q_mean_exc = q_exc.mean() iq_mean_exc = complex(i_mean_exc, q_mean_exc) mod_iq_exc = abs(iq_mean_exc) * 1e6 fig.add_trace( go.Scatter( x=[i_mean_exc], y=[q_mean_exc], name=f" state1_voltage: {mod_iq_exc} <br> mean_exc_state: {iq_mean_exc}", mode="markers", marker=dict(size=10, color="red"), ), row=1, col=1, ) file_gnd = f"{folder}/data/{routine}/data_gnd_q{qubit}.csv" if os.path.exists(file_gnd): i_gnd = data_gnd.get_values("i", "V") q_gnd = data_gnd.get_values("q", "V") i_mean_gnd = i_gnd.mean() q_mean_gnd = q_gnd.mean() iq_mean_gnd = complex(i_mean_gnd, q_mean_gnd) mod_iq_gnd = abs(iq_mean_gnd) * 1e6 fig.add_trace( go.Scatter( x=[i_mean_gnd], y=[q_mean_gnd], name=f" state0_voltage: {mod_iq_gnd} <br> mean_gnd_state: {iq_mean_gnd}", mode="markers", marker=dict(size=10, color="blue"), ), row=1, col=1, ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="i (V)", yaxis_title="q (V)", width=1000 ) return fig src/qcvv/plots/heatmaps.py METASEP # -*- coding: utf-8 -*- import os.path import plotly.graph_objects as go from plotly.subplots import make_subplots from qcvv.data import Dataset def frequency_flux_msr_phase(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="Current (A)", xaxis2_title="Frequency (GHz)", yaxis2_title="Current (A)", ) return fig def frequency_attenuation_msr_phase(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("attenuation", "dB"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("attenuation", "dB"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="Attenuation (dB)", xaxis2_title="Frequency (GHz)", yaxis2_title="Attenuation (dB)", ) return fig def frequency_flux_msr_phase__matrix(folder, routine, qubit, format): fluxes = [] for i in range(25): # FIXME: 25 is hardcoded file = f"{folder}/data/{routine}/data_q{qubit}_f{i}.csv" if os.path.exists(file): fluxes += [i] if len(fluxes) < 1: nb = 1 else: nb = len(fluxes) fig = make_subplots( rows=2, cols=nb, horizontal_spacing=0.1, vertical_spacing=0.1, x_title="Frequency (Hz)", y_title="Current (A)", shared_xaxes=True, shared_yaxes=True, ) for j in fluxes: if j == fluxes[-1]: showscale = True else: showscale = False data = Dataset.load_data(folder, routine, format, f"data_q{qubit}_f{j}") fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("MSR", "V"), showscale=showscale, ), row=1, col=j, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("phase", "rad"), showscale=showscale, ), row=2, col=j, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting ) return fig def duration_gain_msr_phase(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("gain", "dimensionless"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("gain", "dimensionless"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="duration (ns)", yaxis_title="gain (dimensionless)", xaxis2_title="duration (ns)", yaxis2_title="gain (dimensionless)", ) return fig def duration_amplitude_msr_phase(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("amplitude", "dimensionless"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("amplitude", "dimensionless"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="duration (ns)", yaxis_title="amplitude (dimensionless)", xaxis2_title="duration (ns)", yaxis2_title="amplitude (dimensionless)", ) return fig src/qcvv/plots/__init__.py METASEP # -*- coding: utf-8 -*- from qcvv.plots.heatmaps import * from qcvv.plots.scatters import * src/qcvv/fitting/utils.py METASEP # -*- coding: utf-8 -*- import re import numpy as np def lorenzian(frequency, amplitude, center, sigma, offset): # http://openafox.com/science/peak-function-derivations.html return (amplitude / np.pi) * ( sigma / ((frequency - center) ** 2 + sigma**2) ) + offset def rabi(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # Period T : 1/p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] # return p[0] + p[1] * np.sin(2 * np.pi / p[2] * x + p[3]) * np.exp(-x / p[4]) return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(-x * p4) def ramsey(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # DeltaFreq : p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] # return p[0] + p[1] * np.sin(2 * np.pi / p[2] * x + p[3]) * np.exp(-x / p[4]) return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(-x * p4) def exp(x, *p): return p[0] - p[1] * np.exp(-1 * x * p[2]) def flipping(x, p0, p1, p2, p3): # A fit to Flipping Qubit oscillation # Epsilon?? shoule be Amplitude : p[0] # Offset : p[1] # Period of oscillation : p[2] # phase for the first point corresponding to pi/2 rotation : p[3] return np.sin(x * 2 * np.pi / p2 + p3) * p0 + p1 # return p0 * np.sin(p3 + (2 * np.pi * x) / p2) + p1 def parse(key): name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) return name, unit src/qcvv/fitting/methods.py METASEP # -*- coding: utf-8 -*- """Routine-specific method for post-processing data acquired.""" import lmfit import numpy as np from scipy.optimize import curve_fit from qcvv.config import log from qcvv.data import Data from qcvv.fitting.utils import exp, flipping, lorenzian, parse, rabi, ramsey def lorentzian_fit(data, x, y, qubit, nqubits, labels): """Fitting routine for resonator spectroscopy""" data_fit = Data( name=f"fit_q{qubit}", quantities=[ "fit_amplitude", "fit_center", "fit_sigma", "fit_offset", labels[1], labels[0], ], ) frequencies = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) # Create a lmfit model for fitting equation defined in resonator_peak model_Q = lmfit.Model(lorenzian) # Guess parameters for Lorentzian max or min if (nqubits == 1 and labels[0] == "resonator_freq") or ( nqubits != 1 and labels[0] == "qubit_freq" ): guess_center = frequencies[ np.argmax(voltages) ] # Argmax = Returns the indices of the maximum values along an axis. guess_offset = np.mean( voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))] ) guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center) guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi else: guess_center = frequencies[ np.argmin(voltages) ] # Argmin = Returns the indices of the minimum values along an axis. guess_offset = np.mean( voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))] ) guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center) guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi # Add guessed parameters to the model model_Q.set_param_hint("center", value=guess_center, vary=True) model_Q.set_param_hint("sigma", value=guess_sigma, vary=True) model_Q.set_param_hint("amplitude", value=guess_amp, vary=True) model_Q.set_param_hint("offset", value=guess_offset, vary=True) guess_parameters = model_Q.make_params() # fit the model with the data and guessed parameters try: fit_res = model_Q.fit( data=voltages, frequency=frequencies, params=guess_parameters ) except: log.warning("The fitting was not successful") return data_fit # get the values for postprocessing and for legend. f0 = fit_res.best_values["center"] BW = fit_res.best_values["sigma"] * 2 Q = abs(f0 / BW) peak_voltage = ( fit_res.best_values["amplitude"] / (fit_res.best_values["sigma"] * np.pi) + fit_res.best_values["offset"] ) freq = f0 * 1e6 data_fit.add( { labels[1]: peak_voltage, labels[0]: freq, "fit_amplitude": fit_res.best_values["amplitude"], "fit_center": fit_res.best_values["center"], "fit_sigma": fit_res.best_values["sigma"], "fit_offset": fit_res.best_values["offset"], } ) return data_fit def rabi_fit(data, x, y, qubit, nqubits, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", "popt4", labels[0], labels[1], labels[2], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmin(voltages.values)], np.pi / 2, 0.1e-6, ] else: pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmax(voltages.values)], np.pi / 2, 0.1e-6, ] try: popt, pcov = curve_fit( rabi, time.values, voltages.values, p0=pguess, maxfev=10000 ) smooth_dataset = rabi(time.values, *popt) pi_pulse_duration = np.abs((1.0 / popt[2]) / 2) rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6 t1 = 1.0 / popt[4] # double check T1 except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], "popt4": popt[4], labels[0]: pi_pulse_duration, labels[1]: rabi_oscillations_pi_pulse_max_voltage, labels[2]: t1, } ) return data_fit def ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", "popt4", labels[0], labels[1], labels[2], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmin(voltages.values)], np.pi / 2, 500e-9, ] try: popt, pcov = curve_fit( ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000 ) delta_fitting = popt[2] delta_phys = int((delta_fitting * sampling_rate) - offset_freq) corrected_qubit_frequency = int(qubit_freq - delta_phys) t2 = 1.0 / popt[4] except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], "popt4": popt[4], labels[0]: delta_phys, labels[1]: corrected_qubit_frequency, labels[2]: t2, } ) return data_fit def t1_fit(data, x, y, qubit, nqubits, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", labels[0], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [ max(voltages.values), (max(voltages.values) - min(voltages.values)), 1 / 250, ] else: pguess = [ min(voltages.values), (max(voltages.values) - min(voltages.values)), 1 / 250, ] try: popt, pcov = curve_fit( exp, time.values, voltages.values, p0=pguess, maxfev=2000000 ) t1 = abs(1 / popt[2]) except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], labels[0]: t1, } ) return data_fit def flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", labels[0], labels[1], ], ) flips = data.get_values(*parse(x)) # Check X data stores. N flips or i? voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [0.0003, np.mean(voltages), -18, 0] # epsilon guess parameter else: pguess = [0.0003, np.mean(voltages), 18, 0] # epsilon guess parameter try: popt, pcov = curve_fit(flipping, flips, voltages, p0=pguess, maxfev=2000000) epsilon = -np.pi / popt[2] amplitude_delta = np.pi / (np.pi + epsilon) corrected_amplitude = amplitude_delta * pi_pulse_amplitude # angle = (niter * 2 * np.pi / popt[2] + popt[3]) / (1 + 4 * niter) # amplitude_delta = angle * 2 / np.pi * pi_pulse_amplitude except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], labels[0]: amplitude_delta, labels[1]: corrected_amplitude, } ) return data_fit src/qcvv/fitting/__init__.py METASEP src/qcvv/cli/builders.py METASEP # -*- coding: utf-8 -*- import datetime import inspect import os import shutil import yaml from qcvv import calibrations from qcvv.config import log, raise_error from qcvv.data import Data def load_yaml(path): """Load yaml file from disk.""" with open(path, "r") as file: data = yaml.safe_load(file) return data class ActionBuilder: """Class for parsing and executing runcards. Args: runcard (path): path containing the runcard. folder (path): path for the output folder. force (bool): option to overwrite the output folder if it exists already. """ def __init__(self, runcard, folder=None, force=False): path, self.folder = self._generate_output_folder(folder, force) self.runcard = load_yaml(runcard) platform_name = self.runcard["platform"] self._allocate_platform(platform_name) self.qubits = self.runcard["qubits"] self.format = self.runcard["format"] # Saving runcard self.save_runcards(path, runcard) self.save_meta(path, self.folder, platform_name) @staticmethod def _generate_output_folder(folder, force): """Static method for generating the output folder. Args: folder (path): path for the output folder. If None it will be created a folder automatically force (bool): option to overwrite the output folder if it exists already. """ if folder is None: import getpass e = datetime.datetime.now() user = getpass.getuser().replace(".", "-") date = e.strftime("%Y-%m-%d") folder = f"{date}-{'000'}-{user}" num = 0 while os.path.exists(folder): log.warning(f"Directory {folder} already exists.") num += 1 folder = f"{date}-{str(num).rjust(3, '0')}-{user}" log.warning(f"Trying to create directory {folder}") elif os.path.exists(folder) and not force: raise_error(RuntimeError, f"Directory {folder} already exists.") elif os.path.exists(folder) and force: log.warning(f"Deleting previous directory {folder}.") shutil.rmtree(os.path.join(os.getcwd(), folder)) path = os.path.join(os.getcwd(), folder) log.info(f"Creating directory {folder}.") os.makedirs(path) return path, folder def _allocate_platform(self, platform_name): """Allocate the platform using Qibolab.""" from qibo.backends import construct_backend self.platform = construct_backend("qibolab", platform=platform_name).platform def save_runcards(self, path, runcard): """Save the output runcards.""" from qibolab.paths import qibolab_folder platform_runcard = ( qibolab_folder / "runcards" / f"{self.runcard['platform']}.yml" ) shutil.copy(platform_runcard, f"{path}/platform.yml") shutil.copy(runcard, f"{path}/runcard.yml") def save_meta(self, path, folder, platform_name): import qibo import qibolab import qcvv e = datetime.datetime.now(datetime.timezone.utc) meta = {} meta["title"] = folder meta["platform"] = platform_name meta["date"] = e.strftime("%Y-%m-%d") meta["start-time"] = e.strftime("%H:%M:%S") meta["end-time"] = e.strftime("%H:%M:%S") meta["versions"] = { "qibo": qibo.__version__, "qibolab": qibolab.__version__, "qcvv": qcvv.__version__, } with open(f"{path}/meta.yml", "w") as file: yaml.dump(meta, file) def _build_single_action(self, name): """Helper method to parse the actions in the runcard.""" f = getattr(calibrations, name) path = os.path.join(self.folder, f"data/{name}/") os.makedirs(path) sig = inspect.signature(f) params = self.runcard["actions"][name] for param in list(sig.parameters)[2:-1]: if param not in params: raise_error(AttributeError, f"Missing parameter {param} in runcard.") return f, params, path def execute(self): """Method to execute sequentially all the actions in the runcard.""" self.platform.connect() self.platform.setup() self.platform.start() for action in self.runcard["actions"]: routine, args, path = self._build_single_action(action) self._execute_single_action(routine, args, path) self.platform.stop() self.platform.disconnect() def _execute_single_action(self, routine, arguments, path): """Method to execute a single action and retrieving the results.""" for qubit in self.qubits: results = routine(self.platform, qubit, **arguments) if self.format is None: raise_error( ValueError, f"Cannot store data using {self.format} format." ) for data in results: getattr(data, f"to_{self.format}")(path) self.update_platform_runcard(qubit, routine.__name__) def update_platform_runcard(self, qubit, routine): try: data_fit = Data.load_data( self.folder, routine, self.format, f"fit_q{qubit}" ) except: data_fit = Data() params = [i for i in list(data_fit.df.keys()) if "fit" not in i] settings = load_yaml(f"{self.folder}/platform.yml") for param in params: settings["characterization"]["single_qubit"][qubit][param] = int( data_fit.df[param][0] ) with open(f"{self.folder}/data/{routine}/platform.yml", "a+") as file: yaml.dump( settings, file, sort_keys=False, indent=4, default_flow_style=None ) def dump_report(self): from qcvv.web.report import create_report # update end time meta = load_yaml(f"{self.folder}/meta.yml") e = datetime.datetime.now(datetime.timezone.utc) meta["end-time"] = e.strftime("%H:%M:%S") with open(f"{self.folder}/meta.yml", "w") as file: yaml.dump(meta, file) create_report(self.folder) class ReportBuilder: """Parses routines and plots to report and live plotting page. Args: path (str): Path to the data folder to generate report for. """ def __init__(self, path): self.path = path self.metadata = load_yaml(os.path.join(path, "meta.yml")) # find proper path title base, self.title = os.path.join(os.getcwd(), path), "" while self.title in ("", "."): base, self.title = os.path.split(base) self.runcard = load_yaml(os.path.join(path, "runcard.yml")) self.format = self.runcard.get("format") self.qubits = self.runcard.get("qubits") # create calibration routine objects # (could be incorporated to :meth:`qcvv.cli.builders.ActionBuilder._build_single_action`) self.routines = [] for action in self.runcard.get("actions"): routine = getattr(calibrations, action) if not hasattr(routine, "plots"): routine.plots = [] self.routines.append(routine) def get_routine_name(self, routine): """Prettify routine's name for report headers.""" return routine.__name__.replace("_", " ").title() def get_figure(self, routine, method, qubit): """Get html figure for report. Args: routine (Callable): Calibration method. method (Callable): Plot method. qubit (int): Qubit id. """ import tempfile figure = method(self.path, routine.__name__, qubit, self.format) with tempfile.NamedTemporaryFile() as temp: figure.write_html(temp.name, include_plotlyjs=False, full_html=False) fightml = temp.read().decode("utf-8") return fightml def get_live_figure(self, routine, method, qubit): """Get url to dash page for live plotting. This url is used by :meth:`qcvv.web.app.get_graph`. Args: routine (Callable): Calibration method. method (Callable): Plot method. qubit (int): Qubit id. """ return os.path.join( method.__name__, self.path, routine.__name__, str(qubit), self.format, ) src/qcvv/cli/_base.py METASEP # -*- coding: utf-8 -*- """Adds global CLI options.""" import base64 import pathlib import shutil import socket import subprocess import uuid from urllib.parse import urljoin import click from qibo.config import log, raise_error from qcvv.cli.builders import ActionBuilder CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) # options for report upload UPLOAD_HOST = ( "qcvv@localhost" if socket.gethostname() == "saadiyat" else "[email protected]" ) TARGET_DIR = "qcvv-reports/" ROOT_URL = "http://login.qrccluster.com:9000/" @click.command(context_settings=CONTEXT_SETTINGS) @click.argument("runcard", metavar="RUNCARD", type=click.Path(exists=True)) @click.option( "folder", "-o", type=click.Path(), help="Output folder. If not provided a standard name will generated.", ) @click.option( "force", "-f", is_flag=True, help="Use --force option to overwrite the output folder.", ) def command(runcard, folder, force=None): """qcvv: Quantum Calibration Verification and Validation using Qibo. Arguments: - RUNCARD: runcard with declarative inputs. """ action_builder = ActionBuilder(runcard, folder, force) action_builder.execute() action_builder.dump_report() @click.command(context_settings=CONTEXT_SETTINGS) @click.option( "port", "-p", "--port", default=8050, type=int, help="Localhost port to launch dash server.", ) @click.option( "debug", "-d", "--debug", is_flag=True, help="Launch server in debugging mode.", ) def live_plot(port, debug): """Real time plotting of calibration data on a dash server.""" import socket from qcvv.web.app import app # change port if it is already used while True: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: if s.connect_ex(("localhost", port)) != 0: break port += 1 app.run_server(debug=debug, port=port) @click.command(context_settings=CONTEXT_SETTINGS) @click.argument("output_folder", metavar="FOLDER", type=click.Path(exists=True)) def upload(output_folder): """Uploads output folder to server""" output_path = pathlib.Path(output_folder) # check the rsync command exists. if not shutil.which("rsync"): raise_error( RuntimeError, "Could not find the rsync command. Please make sure it is installed.", ) # check that we can authentica with a certificate ssh_command_line = ( "ssh", "-o", "PreferredAuthentications=publickey", "-q", UPLOAD_HOST, "exit", ) str_line = " ".join(repr(ele) for ele in ssh_command_line) log.info(f"Checking SSH connection to {UPLOAD_HOST}.") try: subprocess.run(ssh_command_line, check=True) except subprocess.CalledProcessError as e: raise RuntimeError( ( "Could not validate the SSH key. " "The command\n%s\nreturned a non zero exit status. " "Please make sure that your public SSH key is on the server." ) % str_line ) from e except OSError as e: raise RuntimeError( "Could not run the command\n{}\n: {}".format(str_line, e) ) from e log.info("Connection seems OK.") # upload output randname = base64.urlsafe_b64encode(uuid.uuid4().bytes).decode() newdir = TARGET_DIR + randname rsync_command = ( "rsync", "-aLz", "--chmod=ug=rwx,o=rx", f"{output_path}/", f"{UPLOAD_HOST}:{newdir}", ) log.info(f"Uploading output ({output_path}) to {UPLOAD_HOST}") try: subprocess.run(rsync_command, check=True) except subprocess.CalledProcessError as e: msg = f"Failed to upload output: {e}" raise RuntimeError(msg) from e url = urljoin(ROOT_URL, randname) log.info(f"Upload completed. The result is available at:\n{url}") src/qcvv/cli/__init__.py METASEP # -*- coding: utf-8 -*- """CLI entry point.""" from ._base import command, live_plot, upload src/qcvv/decorators.py METASEP # -*- coding: utf-8 -*- """Decorators implementation.""" import os from qcvv.config import raise_error def plot(header, method): """Decorator for adding plots in the report and live plotting page. Args: header (str): Header of the plot to use in the report. method (Callable): Plotting method defined under ``qcvv.plots``. """ def wrapped(f): if hasattr(f, "plots"): # insert in the beginning of the list to have # proper plot ordering in the report f.plots.insert(0, (header, method)) else: f.plots = [(header, method)] return f return wrapped src/qcvv/data.py METASEP # -*- coding: utf-8 -*- """Implementation of Dataset class to store measurements.""" import re from abc import abstractmethod import pandas as pd import pint_pandas from qcvv.config import raise_error class AbstractDataset: def __init__(self, name=None): if name is None: self.name = "data" else: self.name = name self.df = pd.DataFrame() def __add__(self, data): self.df = pd.concat([self.df, data.df], ignore_index=True) return self @abstractmethod def add(self, data): raise_error(NotImplementedError) def __len__(self): """Computes the length of the dataset.""" return len(self.df) @abstractmethod def load_data(cls, folder, routine, format, name): raise_error(NotImplementedError) @abstractmethod def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" if self.quantities == None: self.df.to_csv(f"{path}/{self.name}.csv") else: self.df.pint.dequantify().to_csv(f"{path}/{self.name}.csv") def to_pickle(self, path): """Save data in pickel file. Args: path (str): Path containing output folder.""" self.df.to_pickle(f"{path}/{self.name}.pkl") class Dataset(AbstractDataset): """Class to store the data measured during the calibration routines. It is a wrapper to a pandas DataFrame with units of measure from the Pint library. Args: quantities (dict): dictionary containing additional quantities that the user may save other than the pulse sequence output. The keys are the name of the quantities and the corresponding values are the units of measure. """ def __init__(self, name=None, quantities=None): super().__init__(name=name) self.df = pd.DataFrame( { "MSR": pd.Series(dtype="pint[V]"), "i": pd.Series(dtype="pint[V]"), "q": pd.Series(dtype="pint[V]"), "phase": pd.Series(dtype="pint[deg]"), } ) self.quantities = {"MSR": "V", "i": "V", "q": "V", "phase": "deg"} if quantities is not None: self.quantities.update(quantities) for name, unit in quantities.items(): self.df.insert(0, name, pd.Series(dtype=f"pint[{unit}]")) from pint import UnitRegistry self.ureg = UnitRegistry() def add(self, data): """Add a row to dataset. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ l = len(self) for key, value in data.items(): name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) # TODO: find a better way to do this self.df.loc[l, name] = value * self.ureg(unit) def get_values(self, quantity, unit): """Get values of a quantity in specified units. Args: quantity (str): Quantity to get the values of. unit (str): Unit of the returned values. Returns: ``pd.Series`` with the quantity values in the given units. """ return self.df[quantity].pint.to(unit).pint.magnitude @classmethod def load_data(cls, folder, routine, format, name): """Load data from specific format. Args: folder (path): path to the output folder from which the data will be loaded routine (str): calibration routine data to be loaded format (str): data format. Possible choices are 'csv' and 'pickle'. Returns: dataset (``Dataset``): dataset object with the loaded data. """ obj = cls() if format == "csv": file = f"{folder}/data/{routine}/{name}.csv" obj.df = pd.read_csv(file, header=[0, 1]) obj.df = obj.df.pint.quantify(level=-1) obj.df.pop("Unnamed: 0_level_0") elif format == "pickle": file = f"{folder}/data/{routine}/{name}.pkl" obj.df = pd.read_pickle(file) else: raise_error(ValueError, f"Cannot load data using {format} format.") return obj def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" self.df.pint.dequantify().to_csv(f"{path}/{self.name}.csv") class Data(AbstractDataset): """Class to store the data obtained from calibration routines. It is a wrapper to a pandas DataFrame. Args: quantities (dict): dictionary quantities to be saved. """ def __init__(self, name=None, quantities=None): super().__init__(name=name) if quantities is not None: self.quantities = quantities for name in quantities: self.df.insert(0, name, pd.Series(dtype=object)) def add(self, data): """Add a row to dataset. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ l = len(self) for key, value in data.items(): self.df.loc[l, key] = value def get_values(self, quantity): """Get values of a quantity in specified units. Args: quantity (str): Quantity to get the values of. Returns: ``pd.Series`` with the quantity values in the given units. """ return self.df[quantity] @classmethod def load_data(cls, folder, routine, format, name): """Load data from specific format. Args: folder (path): path to the output folder from which the data will be loaded routine (str): calibration routine data to be loaded format (str): data format. Possible choices are 'csv' and 'pickle'. Returns: dataset (``Dataset``): dataset object with the loaded data. """ obj = cls() if format == "csv": file = f"{folder}/data/{routine}/{name}.csv" obj.df = pd.read_csv(file) obj.df.pop("Unnamed: 0") elif format == "pickle": file = f"{folder}/data/{routine}/{name}.pkl" obj.df = pd.read_pickle(file) else: raise_error(ValueError, f"Cannot load data using {format} format.") return obj def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" self.df.to_csv(f"{path}/{self.name}.csv") def to_pickle(self, path): """Save data in pickel file. Args: path (str): Path containing output folder.""" self.df.to_pickle(f"{path}/{self.name}.pkl") src/qcvv/config.py METASEP # -*- coding: utf-8 -*- """Custom logger implemenation.""" import logging import os # Logging level from 0 (all) to 4 (errors) (see https://docs.python.org/3/library/logging.html#logging-levels) QCVV_LOG_LEVEL = 1 if "QCVV_LOG_LEVEL" in os.environ: # pragma: no cover QCVV_LOG_LEVEL = 10 * int(os.environ.get("QCVV_LOG_LEVEL")) def raise_error(exception, message=None, args=None): """Raise exception with logging error. Args: exception (Exception): python exception. message (str): the error message. """ log.error(message) if args: raise exception(message, args) else: raise exception(message) # Configuration for logging mechanism class CustomHandler(logging.StreamHandler): """Custom handler for logging algorithm.""" def format(self, record): """Format the record with specific format.""" from qcvv import __version__ fmt = f"[Qcvv {__version__}|%(levelname)s|%(asctime)s]: %(message)s" return logging.Formatter(fmt, datefmt="%Y-%m-%d %H:%M:%S").format(record) # allocate logger object log = logging.getLogger(__name__) log.setLevel(QCVV_LOG_LEVEL) log.addHandler(CustomHandler()) src/qcvv/__init__.py METASEP # -*- coding: utf-8 -*- from .cli import command, live_plot, upload """qcvv: Quantum Calibration Verification and Validation using Qibo.""" import importlib.metadata as im __version__ = im.version(__package__) src/qcvv/calibrations/utils.py METASEP # -*- coding: utf-8 -*- import numpy as np def variable_resolution_scanrange( lowres_width, lowres_step, highres_width, highres_step ): """Helper function for sweeps.""" return np.concatenate( ( np.arange(-lowres_width, -highres_width, lowres_step), np.arange(-highres_width, highres_width, highres_step), np.arange(highres_width, lowres_width, lowres_step), ) ) src/qcvv/calibrations/t1.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qcvv import plots from qcvv.data import Dataset from qcvv.decorators import plot from qcvv.fitting.methods import t1_fit @plot("MSR vs Time", plots.t1_time_msr_phase) def t1( platform: AbstractPlatform, qubit, delay_before_readout_start, delay_before_readout_end, delay_before_readout_step, software_averages, points=10, ): sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration) sequence.add(qd_pulse) sequence.add(ro_pulse) ro_wait_range = np.arange( delay_before_readout_start, delay_before_readout_end, delay_before_readout_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) data = Dataset(name=f"data_q{qubit}", quantities={"Time": "ns"}) count = 0 for _ in range(software_averages): for wait in ro_wait_range: if count % points == 0 and count > 0: yield data yield t1_fit( data, x="Time[ns]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["t1"], ) ro_pulse.start = qd_pulse.duration + wait msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "Time[ns]": wait, } data.add(results) count += 1 yield data src/qcvv/calibrations/resonator_spectroscopy.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qcvv import plots from qcvv.calibrations.utils import variable_resolution_scanrange from qcvv.data import Dataset from qcvv.decorators import plot from qcvv.fitting.methods import lorentzian_fit @plot("MSR and Phase vs Frequency", plots.frequency_msr_phase__fast_precision) def resonator_spectroscopy( platform: AbstractPlatform, qubit, lowres_width, lowres_step, highres_width, highres_step, precision_width, precision_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( variable_resolution_scanrange( lowres_width, lowres_step, highres_width, highres_step ) + resonator_frequency ) fast_sweep_data = Dataset( name=f"fast_sweep_q{qubit}", quantities={"frequency": "Hz"} ) count = 0 for _ in range(software_averages): for freq in frequency_range: if count % points == 0 and count > 0: yield fast_sweep_data yield lorentzian_fit( fast_sweep_data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } fast_sweep_data.add(results) count += 1 yield fast_sweep_data # FIXME: have live ploting work for multiple datasets saved if platform.resonator_type == "3D": resonator_frequency = fast_sweep_data.df.frequency[ fast_sweep_data.df.MSR.index[fast_sweep_data.df.MSR.argmax()] ].magnitude avg_voltage = ( np.mean(fast_sweep_data.df.MSR.values[: (lowres_width // lowres_step)]) * 1e6 ) else: resonator_frequency = fast_sweep_data.df.frequency[ fast_sweep_data.df.MSR.index[fast_sweep_data.df.MSR.argmin()] ].magnitude avg_voltage = ( np.mean(fast_sweep_data.df.MSR.values[: (lowres_width // lowres_step)]) * 1e6 ) precision_sweep__data = Dataset( name=f"precision_sweep_q{qubit}", quantities={"frequency": "Hz"} ) freqrange = ( np.arange(-precision_width, precision_width, precision_step) + resonator_frequency ) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield precision_sweep__data yield lorentzian_fit( fast_sweep_data + precision_sweep__data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } precision_sweep__data.add(results) count += 1 yield precision_sweep__data @plot("Frequency vs Attenuation", plots.frequency_attenuation_msr_phase) @plot("MSR vs Frequency", plots.frequency_attenuation_msr_phase__cut) def resonator_punchout( platform: AbstractPlatform, qubit, freq_width, freq_step, min_att, max_att, step_att, software_averages, points=10, ): platform.reload_settings() data = Dataset( name=f"data_q{qubit}", quantities={"frequency": "Hz", "attenuation": "dB"} ) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence = PulseSequence() sequence.add(ro_pulse) # TODO: move this explicit instruction to the platform resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency - (freq_width / 4) ) attenuation_range = np.flip(np.arange(min_att, max_att, step_att)) count = 0 for _ in range(software_averages): for att in attenuation_range: for freq in frequency_range: if count % points == 0: yield data # TODO: move these explicit instructions to the platform platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.ro_port[qubit].attenuation = att msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr * (np.exp(att / 10)), "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "attenuation[dB]": att, } # TODO: implement normalization data.add(results) count += 1 yield data @plot("MSR and Phase vs Flux Current", plots.frequency_flux_msr_phase) def resonator_spectroscopy_flux( platform: AbstractPlatform, qubit, freq_width, freq_step, current_max, current_min, current_step, software_averages, fluxline=0, points=10, ): platform.reload_settings() if fluxline == "qubit": fluxline = qubit sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) data = Dataset( name=f"data_q{qubit}", quantities={"frequency": "Hz", "current": "A"} ) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] qubit_biasing_current = platform.characterization["single_qubit"][qubit][ "sweetspot" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) current_range = ( np.arange(current_min, current_max, current_step) + qubit_biasing_current ) count = 0 for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data # TODO: automatically extract the sweet spot current # TODO: add a method to generate the matrix @plot("MSR row 1 and Phase row 2", plots.frequency_flux_msr_phase__matrix) def resonator_spectroscopy_flux_matrix( platform: AbstractPlatform, qubit, freq_width, freq_step, current_min, current_max, current_step, fluxlines, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) current_range = np.arange(current_min, current_max, current_step) count = 0 for fluxline in fluxlines: fluxline = int(fluxline) print(fluxline) data = Dataset( name=f"data_q{qubit}_f{fluxline}", quantities={"frequency": "Hz", "current": "A"}, ) for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data src/qcvv/calibrations/ramsey.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qcvv import plots from qcvv.data import Dataset from qcvv.decorators import plot from qcvv.fitting.methods import ramsey_fit @plot("MSR vs Time", plots.time_msr) def ramsey_frequency_detuned( platform: AbstractPlatform, qubit, t_start, t_end, t_step, n_osc, points=10, ): platform.reload_settings() sampling_rate = platform.sampling_rate data = Dataset(name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"}) RX90_pulse1 = platform.create_RX90_pulse(qubit, start=0) RX90_pulse2 = platform.create_RX90_pulse(qubit, start=RX90_pulse1.finish) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX90_pulse2.finish) sequence = PulseSequence() sequence.add(RX90_pulse1) sequence.add(RX90_pulse2) sequence.add(ro_pulse) runcard_qubit_freq = platform.characterization["single_qubit"][qubit]["qubit_freq"] runcard_T2 = platform.characterization["single_qubit"][qubit]["T2"] intermediate_freq = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "frequency" ] current_qubit_freq = runcard_qubit_freq current_T2 = runcard_T2 # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - RX90_pulse1.frequency ) t_end = np.array(t_end) for t_max in t_end: count = 0 platform.qd_port[qubit].lo_frequency = current_qubit_freq - intermediate_freq offset_freq = n_osc / t_max * sampling_rate # Hz t_range = np.arange(t_start, t_max, t_step) for wait in t_range: if count % points == 0 and count > 0: yield data yield ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=current_qubit_freq, sampling_rate=sampling_rate, offset_freq=offset_freq, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) RX90_pulse2.start = RX90_pulse1.finish + wait RX90_pulse2.relative_phase = ( (RX90_pulse2.start / sampling_rate) * (2 * np.pi) * (-offset_freq) ) ro_pulse.start = RX90_pulse2.finish msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "wait[ns]": wait, "t_max[ns]": t_max, } data.add(results) count += 1 # # Fitting data_fit = ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=current_qubit_freq, sampling_rate=sampling_rate, offset_freq=offset_freq, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) new_t2 = data_fit.get_values("t2") corrected_qubit_freq = data_fit.get_values("corrected_qubit_frequency") # if ((new_t2 * 3.5) > t_max): if (new_t2 > current_T2).bool() and len(t_end) > 1: current_qubit_freq = int(corrected_qubit_freq) current_T2 = new_t2 data = Dataset( name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"} ) else: corrected_qubit_freq = int(current_qubit_freq) new_t2 = current_T2 break yield data @plot("MSR vs Time", plots.time_msr) def ramsey( platform: AbstractPlatform, qubit, delay_between_pulses_start, delay_between_pulses_end, delay_between_pulses_step, software_averages, points=10, ): platform.reload_settings() sampling_rate = platform.sampling_rate qubit_freq = platform.characterization["single_qubit"][qubit]["qubit_freq"] RX90_pulse1 = platform.create_RX90_pulse(qubit, start=0) RX90_pulse2 = platform.create_RX90_pulse(qubit, start=RX90_pulse1.finish) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX90_pulse2.finish) sequence = PulseSequence() sequence.add(RX90_pulse1) sequence.add(RX90_pulse2) sequence.add(ro_pulse) waits = np.arange( delay_between_pulses_start, delay_between_pulses_end, delay_between_pulses_step, ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - RX90_pulse1.frequency ) data = Dataset(name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"}) count = 0 for _ in range(software_averages): for wait in waits: if count % points == 0 and count > 0: yield data yield ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=qubit_freq, sampling_rate=sampling_rate, offset_freq=0, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) RX90_pulse2.start = RX90_pulse1.finish + wait ro_pulse.start = RX90_pulse2.finish msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "wait[ns]": wait, "t_max[ns]": np.array(delay_between_pulses_end), } data.add(results) count += 1 yield data src/qcvv/calibrations/rabi_oscillations.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qcvv import plots from qcvv.data import Dataset from qcvv.decorators import plot from qcvv.fitting.methods import rabi_fit @plot("MSR vs Time", plots.time_msr_phase) def rabi_pulse_length( platform: AbstractPlatform, qubit, pulse_duration_start, pulse_duration_end, pulse_duration_step, software_averages, points=10, ): platform.reload_settings() data = Dataset(name=f"data_q{qubit}", quantities={"Time": "ns"}) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="Time[ns]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_duration", "rabi_oscillations_pi_pulse_max_voltage", "t1", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "Time[ns]": duration, } data.add(results) count += 1 yield data @plot("MSR vs Gain", plots.gain_msr_phase) def rabi_pulse_gain( platform: AbstractPlatform, qubit, pulse_gain_start, pulse_gain_end, pulse_gain_step, software_averages, points=10, ): platform.reload_settings() data = Dataset(name=f"data_q{qubit}", quantities={"gain": "dimensionless"}) sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for gain in qd_pulse_gain_range: platform.qd_port[qubit].gain = gain if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="gain[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_gain", "rabi_oscillations_pi_pulse_max_voltage", "t1", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "gain[dimensionless]": gain, } data.add(results) count += 1 yield data @plot("MSR vs Amplitude", plots.amplitude_msr_phase) def rabi_pulse_amplitude( platform, qubit, pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step, software_averages, points=10, ): platform.reload_settings() data = Dataset(name=f"data_q{qubit}", quantities={"amplitude": "dimensionless"}) sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_amplitude_range = np.arange( pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for amplitude in qd_pulse_amplitude_range: qd_pulse.amplitude = amplitude if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="amplitude[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_amplitude", "rabi_oscillations_pi_pulse_max_voltage", "t1", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "amplitude[dimensionless]": amplitude, } data.add(results) count += 1 yield data @plot("MSR vs length and gain", plots.duration_gain_msr_phase) def rabi_pulse_length_and_gain( platform: AbstractPlatform, qubit, pulse_duration_start, pulse_duration_end, pulse_duration_step, pulse_gain_start, pulse_gain_end, pulse_gain_step, software_averages, points=10, ): platform.reload_settings() data = Dataset( name=f"data_q{qubit}", quantities={"duration": "ns", "gain": "dimensionless"} ) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration for gain in qd_pulse_gain_range: platform.qd_port[qubit].gain = gain if count % points == 0 and count > 0: yield data msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "duration[ns]": duration, "gain[dimensionless]": gain, } data.add(results) count += 1 yield data @plot("MSR vs length and amplitude", plots.duration_amplitude_msr_phase) def rabi_pulse_length_and_amplitude( platform, qubit, pulse_duration_start, pulse_duration_end, pulse_duration_step, pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step, software_averages, points=10, ): platform.reload_settings() data = Dataset( name=f"data_q{qubit}", quantities={"duration": "ns", "amplitude": "dimensionless"}, ) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) qd_pulse_amplitude_range = np.arange( pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration for amplitude in qd_pulse_amplitude_range: qd_pulse.amplitude = amplitude if count % points == 0: yield data msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "duration[ns]": duration, "amplitude[dimensionless]": amplitude, } data.add(results) count += 1 yield data src/qcvv/calibrations/qubit_spectroscopy.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qcvv import plots from qcvv.data import Dataset from qcvv.decorators import plot from qcvv.fitting.methods import lorentzian_fit @plot("MSR and Phase vs Frequency", plots.frequency_msr_phase__fast_precision) def qubit_spectroscopy( platform: AbstractPlatform, qubit, fast_start, fast_end, fast_step, precision_start, precision_end, precision_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=5000) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=5000) sequence.add(qd_pulse) sequence.add(ro_pulse) qubit_frequency = platform.characterization["single_qubit"][qubit]["qubit_freq"] freqrange = np.arange(fast_start, fast_end, fast_step) + qubit_frequency data = Dataset(quantities={"frequency": "Hz", "attenuation": "dB"}) # FIXME: Waiting for Qblox platform to take care of that platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) data = Dataset(name=f"fast_sweep_q{qubit}", quantities={"frequency": "Hz"}) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield data yield lorentzian_fit( data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["qubit_freq", "peak_voltage"], ) platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } data.add(results) count += 1 yield data if platform.resonator_type == "3D": qubit_frequency = data.df.frequency[ data.df.MSR.index[data.df.MSR.argmin()] ].magnitude avg_voltage = ( np.mean(data.df.MSR.values[: ((fast_end - fast_start) // fast_step)]) * 1e6 ) else: qubit_frequency = data.df.frequency[ data.df.MSR.index[data.df.MSR.argmax()] ].magnitude avg_voltage = ( np.mean(data.df.MSR.values[: ((fast_end - fast_start) // fast_step)]) * 1e6 ) prec_data = Dataset( name=f"precision_sweep_q{qubit}", quantities={"frequency": "Hz"} ) freqrange = ( np.arange(precision_start, precision_end, precision_step) + qubit_frequency ) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield prec_data yield lorentzian_fit( data + prec_data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["qubit_freq", "peak_voltage"], ) platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } prec_data.add(results) count += 1 yield prec_data # TODO: Estimate avg_voltage correctly @plot("MSR and Phase vs Frequency", plots.frequency_flux_msr_phase) def qubit_spectroscopy_flux( platform: AbstractPlatform, qubit, freq_width, freq_step, current_max, current_min, current_step, software_averages, fluxline, points=10, ): platform.reload_settings() if fluxline == "qubit": fluxline = qubit sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=5000) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=5000) sequence.add(qd_pulse) sequence.add(ro_pulse) data = Dataset( name=f"data_q{qubit}", quantities={"frequency": "Hz", "current": "A"} ) qubit_frequency = platform.characterization["single_qubit"][qubit]["qubit_freq"] qubit_biasing_current = platform.characterization["single_qubit"][qubit][ "sweetspot" ] frequency_range = np.arange(-freq_width, freq_width, freq_step) + qubit_frequency current_range = ( np.arange(current_min, current_max, current_step) + qubit_biasing_current ) count = 0 for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data src/qcvv/calibrations/flipping.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qcvv import plots from qcvv.data import Dataset from qcvv.decorators import plot from qcvv.fitting.methods import flipping_fit @plot("MSR vs Flips", plots.flips_msr_phase) def flipping( platform: AbstractPlatform, qubit, niter, step, points=10, ): platform.reload_settings() pi_pulse_amplitude = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "amplitude" ] data = Dataset(name=f"data_q{qubit}", quantities={"flips": "dimensionless"}) sequence = PulseSequence() RX90_pulse = platform.create_RX90_pulse(qubit, start=0) count = 0 # repeat N iter times for n in range(0, niter, step): if count % points == 0 and count > 0: yield data yield flipping_fit( data, x="flips[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], niter=niter, pi_pulse_amplitude=pi_pulse_amplitude, labels=["amplitude_delta", "corrected_amplitude"], ) sequence.add(RX90_pulse) # execute sequence RX(pi/2) - [RX(pi) - RX(pi)] from 0...n times - RO start1 = RX90_pulse.duration for j in range(n): RX_pulse1 = platform.create_RX_pulse(qubit, start=start1) start2 = start1 + RX_pulse1.duration RX_pulse2 = platform.create_RX_pulse(qubit, start=start2) sequence.add(RX_pulse1) sequence.add(RX_pulse2) start1 = start2 + RX_pulse2.duration # add ro pulse at the end of the sequence ro_pulse = platform.create_qubit_readout_pulse(qubit, start=start1) sequence.add(ro_pulse) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ro_pulse.serial] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "flips[dimensionless]": np.array(n), } data.add(results) count += 1 sequence = PulseSequence() yield data src/qcvv/calibrations/calibrate_qubit_states.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qcvv import plots from qcvv.data import Dataset from qcvv.decorators import plot @plot("exc vs gnd", plots.exc_gnd) def calibrate_qubit_states_binning( platform: AbstractPlatform, qubit, niter, points=10, ): platform.reload_settings() platform.qrm[qubit].ports['i1'].hardware_demod_en = True # binning only works with hardware demodulation enabled # create exc sequence exc_sequence = PulseSequence() RX_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX_pulse.duration) exc_sequence.add(RX_pulse) exc_sequence.add(ro_pulse) data_exc = Dataset(name=f"data_exc_q{qubit}", quantities={"iteration": "dimensionless"}) shots_results = platform.execute_pulse_sequence(exc_sequence, nshots=niter)['shots'][ro_pulse.serial] for n in np.arange(niter): msr, phase, i, q = shots_results[n] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "iteration[dimensionless]": n, } data_exc.add(results) yield data_exc gnd_sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) gnd_sequence.add(ro_pulse) data_gnd = Dataset(name=f"data_gnd_q{qubit}", quantities={"iteration": "dimensionless"}) shots_results = platform.execute_pulse_sequence(gnd_sequence, nshots=niter)['shots'][ro_pulse.serial] for n in np.arange(niter): msr, phase, i, q = shots_results[n] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "iteration[dimensionless]": n, } data_gnd.add(results) yield data_gnd src/qcvv/calibrations/__init__.py METASEP # -*- coding: utf-8 -*- from qcvv.calibrations.flipping import * from qcvv.calibrations.qubit_spectroscopy import * from qcvv.calibrations.rabi_oscillations import * from qcvv.calibrations.ramsey import * from qcvv.calibrations.resonator_spectroscopy import * from qcvv.calibrations.t1 import * from qcvv.calibrations.calibrate_qubit_states import * src/qcvv/calibrations/allXY.py METASEP
[ { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data", "type": "infile" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=RY_drag_pulse.finish, beta=beta_param\n )\n\n # Ry(pi) - Rx(pi/2) - Ro\n seq2 = PulseSequence()\n seq2.add(RY_drag_pulse)\n seq2.add(RX90_drag_pulse)\n seq2.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq2, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=RY_drag_pulse.finish, beta=beta_param\n )\n\n # Ry(pi) - Rx(pi/2) - Ro\n seq2 = PulseSequence()\n seq2.add(RY_drag_pulse)\n seq2.add(RX90_drag_pulse)\n seq2.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq2, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq2.add(results)\n count += 1\n\n # save IQ_module and beta param of each iteration\n yield data_seq1\n yield data_seq2\n\n # beta_optimal = fit_drag_tunning(res1, res2, beta_params)\n # print(beta_optimal)\n\n\ndef _get_sequence_from_gate_pair(platform, gates, qubit, beta_param):\n sampling_rate = platform.sampling_rate\n pulse_frequency = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"frequency\"\n ]\n pulse_duration = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"duration\"\n ]\n # All gates have equal pulse duration\n\n sequence = PulseSequence()\n\n sequenceDuration = 0\n pulse_start = 0\n\n for gate in gates:\n if gate == \"I\":\n # print(\"Transforming to sequence I gate\")\n pass\n\n if gate == \"RX(pi)\":\n # print(\"Transforming to sequence RX(pi) gate\")\n if beta_param == None:\n RX_pulse = platform.create_RX_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=RY_drag_pulse.finish, beta=beta_param\n )\n\n # Ry(pi) - Rx(pi/2) - Ro\n seq2 = PulseSequence()\n seq2.add(RY_drag_pulse)\n seq2.add(RX90_drag_pulse)\n seq2.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq2, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq2.add(results)\n count += 1\n\n # save IQ_module and beta param of each iteration\n yield data_seq1\n yield data_seq2\n\n # beta_optimal = fit_drag_tunning(res1, res2, beta_params)\n # print(beta_optimal)\n\n\ndef _get_sequence_from_gate_pair(platform, gates, qubit, beta_param):\n sampling_rate = platform.sampling_rate\n pulse_frequency = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"frequency\"\n ]\n pulse_duration = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"duration\"\n ]\n # All gates have equal pulse duration\n\n sequence = PulseSequence()\n\n sequenceDuration = 0\n pulse_start = 0\n\n for gate in gates:\n if gate == \"I\":\n # print(\"Transforming to sequence I gate\")\n pass\n\n if gate == \"RX(pi)\":\n # print(\"Transforming to sequence RX(pi) gate\")\n if beta_param == None:\n RX_pulse = platform.create_RX_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )\n sequence.add(RX_pulse)\n\n if gate == \"RX(pi/2)\":\n # print(\"Transforming to sequence RX(pi/2) gate\")\n if beta_param == None:\n RX90_pulse = platform.create_RX90_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX90_pulse = platform.create_RX90_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=RY_drag_pulse.finish, beta=beta_param\n )\n\n # Ry(pi) - Rx(pi/2) - Ro\n seq2 = PulseSequence()\n seq2.add(RY_drag_pulse)\n seq2.add(RX90_drag_pulse)\n seq2.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq2, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq2.add(results)\n count += 1\n\n # save IQ_module and beta param of each iteration\n yield data_seq1\n yield data_seq2\n\n # beta_optimal = fit_drag_tunning(res1, res2, beta_params)\n # print(beta_optimal)\n\n\ndef _get_sequence_from_gate_pair(platform, gates, qubit, beta_param):\n sampling_rate = platform.sampling_rate\n pulse_frequency = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"frequency\"\n ]\n pulse_duration = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"duration\"\n ]\n # All gates have equal pulse duration\n\n sequence = PulseSequence()\n\n sequenceDuration = 0\n pulse_start = 0\n\n for gate in gates:\n if gate == \"I\":\n # print(\"Transforming to sequence I gate\")\n pass\n\n if gate == \"RX(pi)\":\n # print(\"Transforming to sequence RX(pi) gate\")\n if beta_param == None:\n RX_pulse = platform.create_RX_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )\n sequence.add(RX_pulse)\n\n if gate == \"RX(pi/2)\":\n # print(\"Transforming to sequence RX(pi/2) gate\")\n if beta_param == None:\n RX90_pulse = platform.create_RX90_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX90_pulse = platform.create_RX90_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )\n sequence.add(RX90_pulse)\n\n if gate == \"RY(pi)\":\n # print(\"Transforming to sequence RY(pi) gate\")\n if beta_param == None:\n RY_pulse = platform.create_RX_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n )\n else:\n RY_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n beta=beta_param,\n )", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=RY_drag_pulse.finish, beta=beta_param\n )\n\n # Ry(pi) - Rx(pi/2) - Ro\n seq2 = PulseSequence()\n seq2.add(RY_drag_pulse)\n seq2.add(RX90_drag_pulse)\n seq2.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq2, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq2.add(results)\n count += 1\n\n # save IQ_module and beta param of each iteration\n yield data_seq1\n yield data_seq2\n\n # beta_optimal = fit_drag_tunning(res1, res2, beta_params)\n # print(beta_optimal)\n\n\ndef _get_sequence_from_gate_pair(platform, gates, qubit, beta_param):\n sampling_rate = platform.sampling_rate\n pulse_frequency = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"frequency\"\n ]\n pulse_duration = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"duration\"\n ]\n # All gates have equal pulse duration\n\n sequence = PulseSequence()\n\n sequenceDuration = 0\n pulse_start = 0\n\n for gate in gates:\n if gate == \"I\":\n # print(\"Transforming to sequence I gate\")\n pass\n\n if gate == \"RX(pi)\":\n # print(\"Transforming to sequence RX(pi) gate\")\n if beta_param == None:\n RX_pulse = platform.create_RX_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )\n sequence.add(RX_pulse)\n\n if gate == \"RX(pi/2)\":\n # print(\"Transforming to sequence RX(pi/2) gate\")\n if beta_param == None:\n RX90_pulse = platform.create_RX90_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX90_pulse = platform.create_RX90_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )\n sequence.add(RX90_pulse)\n\n if gate == \"RY(pi)\":\n # print(\"Transforming to sequence RY(pi) gate\")\n if beta_param == None:\n RY_pulse = platform.create_RX_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n )\n else:\n RY_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n beta=beta_param,\n )\n sequence.add(RY_pulse)\n\n if gate == \"RY(pi/2)\":\n # print(\"Transforming to sequence RY(pi/2) gate\")\n if beta_param == None:\n RY90_pulse = platform.create_RX90_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n )\n else:\n RY90_pulse = platform.create_RX90_drag_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n beta=beta_param,\n )", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=RY_drag_pulse.finish, beta=beta_param\n )\n\n # Ry(pi) - Rx(pi/2) - Ro\n seq2 = PulseSequence()", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=RY_drag_pulse.finish, beta=beta_param\n )\n\n # Ry(pi) - Rx(pi/2) - Ro\n seq2 = PulseSequence()\n seq2.add(RY_drag_pulse)", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=RY_drag_pulse.finish, beta=beta_param\n )\n\n # Ry(pi) - Rx(pi/2) - Ro\n seq2 = PulseSequence()\n seq2.add(RY_drag_pulse)\n seq2.add(RX90_drag_pulse)", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=RY_drag_pulse.finish, beta=beta_param\n )\n\n # Ry(pi) - Rx(pi/2) - Ro\n seq2 = PulseSequence()\n seq2.add(RY_drag_pulse)\n seq2.add(RX90_drag_pulse)\n seq2.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq2, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq2.add(results)\n count += 1\n\n # save IQ_module and beta param of each iteration\n yield data_seq1\n yield data_seq2\n\n # beta_optimal = fit_drag_tunning(res1, res2, beta_params)\n # print(beta_optimal)\n\n\ndef _get_sequence_from_gate_pair(platform, gates, qubit, beta_param):\n sampling_rate = platform.sampling_rate\n pulse_frequency = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"frequency\"\n ]\n pulse_duration = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"duration\"\n ]\n # All gates have equal pulse duration\n\n sequence = PulseSequence()\n\n sequenceDuration = 0\n pulse_start = 0\n\n for gate in gates:\n if gate == \"I\":\n # print(\"Transforming to sequence I gate\")\n pass\n\n if gate == \"RX(pi)\":\n # print(\"Transforming to sequence RX(pi) gate\")\n if beta_param == None:\n RX_pulse = platform.create_RX_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )\n sequence.add(RX_pulse)\n\n if gate == \"RX(pi/2)\":\n # print(\"Transforming to sequence RX(pi/2) gate\")\n if beta_param == None:\n RX90_pulse = platform.create_RX90_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX90_pulse = platform.create_RX90_drag_pulse(\n qubit,", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=RY_drag_pulse.finish, beta=beta_param\n )\n\n # Ry(pi) - Rx(pi/2) - Ro\n seq2 = PulseSequence()\n seq2.add(RY_drag_pulse)\n seq2.add(RX90_drag_pulse)\n seq2.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq2, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq2.add(results)\n count += 1\n\n # save IQ_module and beta param of each iteration\n yield data_seq1\n yield data_seq2\n\n # beta_optimal = fit_drag_tunning(res1, res2, beta_params)\n # print(beta_optimal)\n\n\ndef _get_sequence_from_gate_pair(platform, gates, qubit, beta_param):\n sampling_rate = platform.sampling_rate\n pulse_frequency = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"frequency\"\n ]\n pulse_duration = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"duration\"\n ]\n # All gates have equal pulse duration\n\n sequence = PulseSequence()\n\n sequenceDuration = 0\n pulse_start = 0\n\n for gate in gates:\n if gate == \"I\":\n # print(\"Transforming to sequence I gate\")\n pass\n\n if gate == \"RX(pi)\":\n # print(\"Transforming to sequence RX(pi) gate\")\n if beta_param == None:\n RX_pulse = platform.create_RX_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )\n sequence.add(RX_pulse)\n\n if gate == \"RX(pi/2)\":\n # print(\"Transforming to sequence RX(pi/2) gate\")\n if beta_param == None:\n RX90_pulse = platform.create_RX90_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX90_pulse = platform.create_RX90_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )\n sequence.add(RX90_pulse)\n\n if gate == \"RY(pi)\":\n # print(\"Transforming to sequence RY(pi) gate\")\n if beta_param == None:\n RY_pulse = platform.create_RX_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n )\n else:\n RY_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n beta=beta_param,\n )\n sequence.add(RY_pulse)\n\n if gate == \"RY(pi/2)\":\n # print(\"Transforming to sequence RY(pi/2) gate\")\n if beta_param == None:\n RY90_pulse = platform.create_RX90_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n )\n else:\n RY90_pulse = platform.create_RX90_drag_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n beta=beta_param,\n )\n sequence.add(RY90_pulse)\n\n sequenceDuration = sequenceDuration + pulse_duration\n pulse_start = pulse_duration\n\n # RO pulse starting just after pair of gates", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish\n )\n\n # Rx(pi/2) - Ry(pi) - Ro\n seq1 = PulseSequence()\n seq1.add(RX90_drag_pulse)\n seq1.add(RY_drag_pulse)\n seq1.add(ro_pulse)\n msr, i, q, phase = platform.execute_pulse_sequence(seq1, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq1.add(results)\n\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit, start=0, relative_phase=np.pi / 2, beta=beta_param\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=RY_drag_pulse.finish, beta=beta_param\n )\n\n # Ry(pi) - Rx(pi/2) - Ro\n seq2 = PulseSequence()\n seq2.add(RY_drag_pulse)\n seq2.add(RX90_drag_pulse)\n seq2.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq2, nshots=1024)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[deg]\": phase,\n \"beta_param[dimensionless]\": beta_param,\n }\n data_seq2.add(results)\n count += 1\n\n # save IQ_module and beta param of each iteration\n yield data_seq1\n yield data_seq2\n\n # beta_optimal = fit_drag_tunning(res1, res2, beta_params)\n # print(beta_optimal)\n\n\ndef _get_sequence_from_gate_pair(platform, gates, qubit, beta_param):\n sampling_rate = platform.sampling_rate\n pulse_frequency = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"frequency\"\n ]\n pulse_duration = platform.settings[\"native_gates\"][\"single_qubit\"][qubit][\"RX\"][\n \"duration\"\n ]\n # All gates have equal pulse duration\n\n sequence = PulseSequence()\n\n sequenceDuration = 0\n pulse_start = 0\n\n for gate in gates:\n if gate == \"I\":\n # print(\"Transforming to sequence I gate\")\n pass\n\n if gate == \"RX(pi)\":\n # print(\"Transforming to sequence RX(pi) gate\")\n if beta_param == None:\n RX_pulse = platform.create_RX_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )\n sequence.add(RX_pulse)\n\n if gate == \"RX(pi/2)\":\n # print(\"Transforming to sequence RX(pi/2) gate\")\n if beta_param == None:\n RX90_pulse = platform.create_RX90_pulse(\n qubit,\n start=pulse_start,\n )\n else:\n RX90_pulse = platform.create_RX90_drag_pulse(\n qubit,\n start=pulse_start,\n beta=beta_param,\n )\n sequence.add(RX90_pulse)\n\n if gate == \"RY(pi)\":\n # print(\"Transforming to sequence RY(pi) gate\")\n if beta_param == None:\n RY_pulse = platform.create_RX_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n )\n else:\n RY_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=pulse_start,\n relative_phase=np.pi / 2,\n beta=beta_param,\n )\n sequence.add(RY_pulse)\n\n if gate == \"RY(pi/2)\":\n # print(\"Transforming to sequence RY(pi/2) gate\")\n if beta_param == None:", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import drag_tunning_fit\n\n# allXY rotations\ngatelist = [\n [\"I\", \"I\"],\n [\"RX(pi)\", \"RX(pi)\"],\n [\"RY(pi)\", \"RY(pi)\"],\n [\"RX(pi)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RX(pi)\"],\n [\"RX(pi/2)\", \"I\"],\n [\"RY(pi/2)\", \"I\"],\n [\"RX(pi/2)\", \"RY(pi/2)\"],\n [\"RY(pi/2)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RY(pi)\"],\n [\"RY(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RY(pi/2)\"],\n [\"RY(pi)\", \"RX(pi/2)\"],\n [\"RX(pi/2)\", \"RX(pi)\"],\n [\"RX(pi)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi)\"],\n [\"RY(pi)\", \"RY(pi/2)\"],\n [\"RX(pi)\", \"I\"],\n [\"RY(pi)\", \"I\"],\n [\"RX(pi/2)\", \"RX(pi/2)\"],\n [\"RY(pi/2)\", \"RY(pi/2)\"],\n]\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate)\ndef allXY(\n platform: AbstractPlatform,\n qubit,\n beta_param=None,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"probability\": \"dimensionless\", \"gateNumber\": \"dimensionless\"},\n )\n\n count = 0\n for _ in range(software_averages):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)\ndef allXY_iteration(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n software_averages=1,\n points=10,\n):\n platform.reload_settings()\n\n state0_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state0_voltage\"]\n )\n state1_voltage = complex(\n platform.characterization[\"single_qubit\"][qubit][\"state1_voltage\"]\n )\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\n \"probability\": \"dimensionless\",\n \"gateNumber\": \"dimensionless\",\n \"beta_param\": \"dimensionless\",\n },\n )\n\n count = 0\n for _ in range(software_averages):\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(1):\n gateNumber = 1\n for gates in gatelist:\n if count % points == 0 and count > 0:\n yield data\n seq, ro_pulse = _get_sequence_from_gate_pair(\n platform, gates, qubit, beta_param\n )\n seq.add(ro_pulse)\n msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[\n ro_pulse.serial\n ]\n\n if platform.resonator_type == \"3D\":\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state0_voltage - state1_voltage\n )\n prob = (2 * prob) - 1\n\n else:\n prob = np.abs(msr * 1e6 - state1_voltage) / (\n state1_voltage - state0_voltage\n )\n prob = (2 * prob) - 1\n\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"probability[dimensionless]\": prob,\n \"gateNumber[dimensionless]\": np.array(gateNumber),\n \"beta_param[dimensionless]\": np.array(beta_param),\n }\n data.add(results)\n count += 1\n gateNumber += 1\n yield data\n\n\n@plot(\"MSR vs beta parameter\", plots.msr_beta)\ndef drag_pulse_tunning(\n platform: AbstractPlatform,\n qubit,\n beta_start,\n beta_end,\n beta_step,\n points=10,\n):\n\n platform.reload_settings()\n\n data_seq1 = Dataset(\n name=f\"data_seq1_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n data_seq2 = Dataset(\n name=f\"data_seq2_q{qubit}\", quantities={\"beta_param\": \"dimensionless\"}\n )\n\n count = 0\n for beta_param in np.arange(beta_start, beta_end, beta_step).round(4):\n if count % points == 0 and count > 0:\n yield data_seq1\n yield data_seq2\n yield drag_tunning_fit(\n data_seq1,\n data_seq2,\n x=\"beta_param[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"optimal_beta_param\",\n \"optimal_beta_param_y\",\n ],\n )\n # drag pulse RX(pi/2)\n RX90_drag_pulse = platform.create_RX90_drag_pulse(\n qubit, start=0, beta=beta_param\n )\n # drag pulse RY(pi)\n RY_drag_pulse = platform.create_RX_drag_pulse(\n qubit,\n start=RX90_drag_pulse.finish,\n relative_phase=+np.pi / 2,\n beta=beta_param,\n )\n # RO pulse\n ro_pulse = platform.create_qubit_readout_pulse(\n qubit, start=RY_drag_pulse.finish", "type": "random" } ]
[ " seq, ro_pulse = _get_sequence_from_gate_pair(", " data_seq2.add(results)", " seq.add(ro_pulse)", " data.add(results)", "@plot(\"Prob vs gate sequence\", plots.prob_gate)", "@plot(\"MSR vs beta parameter\", plots.msr_beta)", " sequence.add(RX_pulse)", " data_seq1 = Dataset(", " data = Dataset(", " data_seq2 = Dataset(", " sequence.add(RX90_pulse)", " sequence.add(RY_pulse)", " seq1.add(RX90_drag_pulse)", " seq1.add(RY_drag_pulse)", " seq1.add(ro_pulse)", "@plot(\"Prob vs gate sequence\", plots.prob_gate_iteration)", " sequence.add(RY90_pulse)", " data_seq1.add(results)", " seq2.add(RY_drag_pulse)", " seq2.add(RX90_drag_pulse)", " seq2.add(ro_pulse)", " start=pulse_start,", " ro_pulse = platform.create_qubit_readout_pulse(qubit, start=sequenceDuration + 4)", " qubit, start=0, beta=beta_param", " qubit, start=0, relative_phase=np.pi / 2, beta=beta_param", " start=RX90_drag_pulse.finish,", " qubit, start=RY_drag_pulse.finish, beta=beta_param", " qubit, start=RY_drag_pulse.finish", "from qcvv.data import Dataset", "", " # drag pulse RY(pi)", " # drag pulse RX(pi/2)", " RY90_pulse = platform.create_RX90_pulse(", " yield data_seq2", " platform: AbstractPlatform,", " prob = np.abs(msr * 1e6 - state1_voltage) / (", " )" ]
METASEP
16
qiboteam__qibocal
qiboteam__qibocal METASEP doc/source/conf.py METASEP # -*- coding: utf-8 -*- # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys from recommonmark.transform import AutoStructify sys.path.insert(0, os.path.abspath("..")) import qibocal # -- Project information ----------------------------------------------------- project = "qibocal" copyright = "2022, The Qibo team" author = "The Qibo team" # The full version, including alpha/beta/rc tags release = qibocal.__version__ # -- General configuration --------------------------------------------------- # https://stackoverflow.com/questions/56336234/build-fail-sphinx-error-contents-rst-not-found master_doc = "index" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.intersphinx", "recommonmark", "sphinx_markdown_tables", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # Markdown configuration # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = {".rst": "restructuredtext", ".txt": "markdown", ".md": "markdown"} autosectionlabel_prefix_document = True # Allow to embed rst syntax in markdown files. enable_eval_rst = True # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # -- Intersphinx ------------------------------------------------------------- intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} # -- Autodoc ------------------------------------------------------------------ # autodoc_member_order = "bysource" # Adapted this from # https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/docs/conf.py # app setup hook def setup(app): app.add_config_value("recommonmark_config", {"enable_eval_rst": True}, True) app.add_transform(AutoStructify) app.add_css_file("css/style.css") serverscripts/qibocal-update-on-change.py METASEP #!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import curio import inotify.adapters import inotify.constants from curio import subprocess async def main(folder, exe_args): i = inotify.adapters.Inotify() i.add_watch(folder) for event in i.event_gen(yield_nones=False): if event is not None: (header, _, _, _) = event if ( (header.mask & inotify.constants.IN_CREATE) or (header.mask & inotify.constants.IN_DELETE) or (header.mask & inotify.constants.IN_MODIFY) ): await subprocess.run(exe_args) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("folder") parser.add_argument("exe_args", nargs="+") args = parser.parse_args() curio.run(main(args.folder, args.exe_args)) serverscripts/qibocal-index-reports.py METASEP # -*- coding: utf-8 -*- """qibocal-index-reports.py Generates a JSON index with reports information. """ import json import pathlib import sys from collections import ChainMap import yaml ROOT = "/home/users/qibocal/qibocal-reports" ROOT_URL = "http://login.qrccluster.com:9000/" OUT = "/home/users/qibocal/qibocal-reports/index.json" DEFAULTS = { "title": "-", "date": "-", "platform": "-", "start-time": "-", "end-time": "-", } REQUIRED_FILE_METADATA = {"title", "date", "platform", "start-time" "end-time"} def meta_from_path(p): meta = ChainMap(DEFAULTS) yaml_meta = p / "meta.yml" yaml_res = {} if yaml_meta.exists(): with yaml_meta.open() as f: try: yaml_res = yaml.safe_load(f) except yaml.YAMLError as e: print(f"Error processing {yaml_meta}: {e}", file=sys.stderr) meta = meta.new_child(yaml_res) return meta def register(p): path_meta = meta_from_path(p) title, date, platform, start_time, end_time = ( path_meta["title"], path_meta["date"], path_meta["platform"], path_meta["start-time"], path_meta["end-time"], ) url = ROOT_URL + p.name titlelink = f'<a href="{url}">{title}</a>' return (titlelink, date, platform, start_time, end_time) def make_index(): root_path = pathlib.Path(ROOT) data = [] for p in root_path.iterdir(): if p.is_dir(): try: res = register(p) data.append(res) except: print("Error processing folder", p, file=sys.stderr) raise with open(OUT, "w") as f: json.dump({"data": data}, f) if __name__ == "__main__": make_index() src/qibocal/calibrations/protocols/test.py METASEP # -*- coding: utf-8 -*- from qibo import gates, models from qibocal.data import Data def test( platform, qubit: list, nshots, points=1, ): data = Data("test", quantities=["nshots", "probabilities"]) nqubits = len(qubit) circuit = models.Circuit(nqubits) circuit.add(gates.H(qubit[0])) circuit.add(gates.H(qubit[1])) # circuit.add(gates.H(1)) circuit.add(gates.M(*qubit)) execution = circuit(nshots=nshots) data.add({"nshots": nshots, "probabilities": execution.probabilities()}) yield data src/qibocal/calibrations/protocols/__init__.py METASEP src/qibocal/calibrations/characterization/utils.py METASEP # -*- coding: utf-8 -*- import numpy as np def variable_resolution_scanrange( lowres_width, lowres_step, highres_width, highres_step ): """Helper function for sweeps.""" return np.concatenate( ( np.arange(-lowres_width, -highres_width, lowres_step), np.arange(-highres_width, highres_width, highres_step), np.arange(highres_width, lowres_width, lowres_step), ) ) src/qibocal/calibrations/characterization/t1.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import t1_fit @plot("MSR vs Time", plots.t1_time_msr_phase) def t1( platform: AbstractPlatform, qubit: int, delay_before_readout_start, delay_before_readout_end, delay_before_readout_step, software_averages, points=10, ): sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration) sequence.add(qd_pulse) sequence.add(ro_pulse) ro_wait_range = np.arange( delay_before_readout_start, delay_before_readout_end, delay_before_readout_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) data = DataUnits(name=f"data_q{qubit}", quantities={"Time": "ns"}) count = 0 for _ in range(software_averages): for wait in ro_wait_range: if count % points == 0 and count > 0: yield data yield t1_fit( data, x="Time[ns]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["t1"], ) ro_pulse.start = qd_pulse.duration + wait msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "Time[ns]": wait, } data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/resonator_spectroscopy.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.calibrations.characterization.utils import variable_resolution_scanrange from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import lorentzian_fit @plot("MSR and Phase vs Frequency", plots.frequency_msr_phase__fast_precision) def resonator_spectroscopy( platform: AbstractPlatform, qubit: int, lowres_width, lowres_step, highres_width, highres_step, precision_width, precision_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( variable_resolution_scanrange( lowres_width, lowres_step, highres_width, highres_step ) + resonator_frequency ) fast_sweep_data = DataUnits( name=f"fast_sweep_q{qubit}", quantities={"frequency": "Hz"} ) count = 0 for _ in range(software_averages): for freq in frequency_range: if count % points == 0 and count > 0: yield fast_sweep_data yield lorentzian_fit( fast_sweep_data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } fast_sweep_data.add(results) count += 1 yield fast_sweep_data if platform.resonator_type == "3D": resonator_frequency = fast_sweep_data.get_values("frequency", "Hz")[ np.argmax(fast_sweep_data.get_values("MSR", "V")) ] avg_voltage = ( np.mean( fast_sweep_data.get_values("MSR", "V")[: (lowres_width // lowres_step)] ) * 1e6 ) else: resonator_frequency = fast_sweep_data.get_values("frequency", "Hz")[ np.argmin(fast_sweep_data.get_values("MSR", "V")) ] avg_voltage = ( np.mean( fast_sweep_data.get_values("MSR", "V")[: (lowres_width // lowres_step)] ) * 1e6 ) precision_sweep__data = DataUnits( name=f"precision_sweep_q{qubit}", quantities={"frequency": "Hz"} ) freqrange = ( np.arange(-precision_width, precision_width, precision_step) + resonator_frequency ) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield precision_sweep__data yield lorentzian_fit( fast_sweep_data + precision_sweep__data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } precision_sweep__data.add(results) count += 1 yield precision_sweep__data @plot("Frequency vs Attenuation", plots.frequency_attenuation_msr_phase) @plot("MSR vs Frequency", plots.frequency_attenuation_msr_phase__cut) def resonator_punchout( platform: AbstractPlatform, qubit: int, freq_width, freq_step, min_att, max_att, step_att, software_averages, points=10, ): platform.reload_settings() data = DataUnits( name=f"data_q{qubit}", quantities={"frequency": "Hz", "attenuation": "dB"} ) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence = PulseSequence() sequence.add(ro_pulse) # TODO: move this explicit instruction to the platform resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency - (freq_width / 4) ) attenuation_range = np.flip(np.arange(min_att, max_att, step_att)) count = 0 for _ in range(software_averages): for att in attenuation_range: for freq in frequency_range: if count % points == 0: yield data # TODO: move these explicit instructions to the platform platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.ro_port[qubit].attenuation = att msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr * (np.exp(att / 10)), "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "attenuation[dB]": att, } # TODO: implement normalization data.add(results) count += 1 yield data @plot("MSR and Phase vs Flux Current", plots.frequency_flux_msr_phase) def resonator_spectroscopy_flux( platform: AbstractPlatform, qubit: int, freq_width, freq_step, current_max, current_min, current_step, software_averages, fluxline=0, points=10, ): platform.reload_settings() if fluxline == "qubit": fluxline = qubit sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) data = DataUnits( name=f"data_q{qubit}", quantities={"frequency": "Hz", "current": "A"} ) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] qubit_biasing_current = platform.characterization["single_qubit"][qubit][ "sweetspot" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) current_range = ( np.arange(current_min, current_max, current_step) + qubit_biasing_current ) count = 0 for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data # TODO: automatically extract the sweet spot current # TODO: add a method to generate the matrix @plot("MSR row 1 and Phase row 2", plots.frequency_flux_msr_phase__matrix) def resonator_spectroscopy_flux_matrix( platform: AbstractPlatform, qubit: int, freq_width, freq_step, current_min, current_max, current_step, fluxlines, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) current_range = np.arange(current_min, current_max, current_step) count = 0 for fluxline in fluxlines: fluxline = int(fluxline) print(fluxline) data = DataUnits( name=f"data_q{qubit}_f{fluxline}", quantities={"frequency": "Hz", "current": "A"}, ) for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data @plot("MSR and Phase vs Frequency", plots.dispersive_frequency_msr_phase) def dispersive_shift( platform: AbstractPlatform, qubit: int, freq_width, freq_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) data_spec = DataUnits(name=f"data_q{qubit}", quantities={"frequency": "Hz"}) count = 0 for _ in range(software_averages): for freq in frequency_range: if count % points == 0 and count > 0: yield data_spec yield lorentzian_fit( data_spec, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } data_spec.add(results) count += 1 yield data_spec # Shifted Spectroscopy sequence = PulseSequence() RX_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX_pulse.finish) sequence.add(RX_pulse) sequence.add(ro_pulse) data_shifted = DataUnits( name=f"data_shifted_q{qubit}", quantities={"frequency": "Hz"} ) count = 0 for _ in range(software_averages): for freq in frequency_range: if count % points == 0 and count > 0: yield data_shifted yield lorentzian_fit( data_shifted, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], fit_file_name="fit_shifted", ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } data_shifted.add(results) count += 1 yield data_shifted src/qibocal/calibrations/characterization/ramsey.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import ramsey_fit @plot("MSR vs Time", plots.time_msr) def ramsey_frequency_detuned( platform: AbstractPlatform, qubit: int, t_start, t_end, t_step, n_osc, points=10, ): platform.reload_settings() sampling_rate = platform.sampling_rate data = DataUnits(name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"}) RX90_pulse1 = platform.create_RX90_pulse(qubit, start=0) RX90_pulse2 = platform.create_RX90_pulse(qubit, start=RX90_pulse1.finish) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX90_pulse2.finish) sequence = PulseSequence() sequence.add(RX90_pulse1) sequence.add(RX90_pulse2) sequence.add(ro_pulse) runcard_qubit_freq = platform.characterization["single_qubit"][qubit]["qubit_freq"] runcard_T2 = platform.characterization["single_qubit"][qubit]["T2"] intermediate_freq = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "frequency" ] current_qubit_freq = runcard_qubit_freq current_T2 = runcard_T2 # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - RX90_pulse1.frequency ) t_end = np.array(t_end) for t_max in t_end: count = 0 platform.qd_port[qubit].lo_frequency = current_qubit_freq - intermediate_freq offset_freq = n_osc / t_max * sampling_rate # Hz t_range = np.arange(t_start, t_max, t_step) for wait in t_range: if count % points == 0 and count > 0: yield data yield ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=current_qubit_freq, sampling_rate=sampling_rate, offset_freq=offset_freq, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) RX90_pulse2.start = RX90_pulse1.finish + wait RX90_pulse2.relative_phase = ( (RX90_pulse2.start / sampling_rate) * (2 * np.pi) * (-offset_freq) ) ro_pulse.start = RX90_pulse2.finish msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "wait[ns]": wait, "t_max[ns]": t_max, } data.add(results) count += 1 # # Fitting data_fit = ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=current_qubit_freq, sampling_rate=sampling_rate, offset_freq=offset_freq, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) new_t2 = data_fit.get_values("t2") corrected_qubit_freq = data_fit.get_values("corrected_qubit_frequency") # if ((new_t2 * 3.5) > t_max): if (new_t2 > current_T2).bool() and len(t_end) > 1: current_qubit_freq = int(corrected_qubit_freq) current_T2 = new_t2 data = DataUnits( name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"} ) else: corrected_qubit_freq = int(current_qubit_freq) new_t2 = current_T2 break yield data @plot("MSR vs Time", plots.time_msr) def ramsey( platform: AbstractPlatform, qubit: int, delay_between_pulses_start, delay_between_pulses_end, delay_between_pulses_step, software_averages, points=10, ): platform.reload_settings() sampling_rate = platform.sampling_rate qubit_freq = platform.characterization["single_qubit"][qubit]["qubit_freq"] RX90_pulse1 = platform.create_RX90_pulse(qubit, start=0) RX90_pulse2 = platform.create_RX90_pulse(qubit, start=RX90_pulse1.finish) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX90_pulse2.finish) sequence = PulseSequence() sequence.add(RX90_pulse1) sequence.add(RX90_pulse2) sequence.add(ro_pulse) waits = np.arange( delay_between_pulses_start, delay_between_pulses_end, delay_between_pulses_step, ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - RX90_pulse1.frequency ) data = DataUnits(name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"}) count = 0 for _ in range(software_averages): for wait in waits: if count % points == 0 and count > 0: yield data yield ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=qubit_freq, sampling_rate=sampling_rate, offset_freq=0, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) RX90_pulse2.start = RX90_pulse1.finish + wait ro_pulse.start = RX90_pulse2.finish msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "wait[ns]": wait, "t_max[ns]": delay_between_pulses_end, } data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/rabi_oscillations.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import rabi_fit @plot("MSR vs Time", plots.time_msr_phase) def rabi_pulse_length( platform: AbstractPlatform, qubit: int, pulse_duration_start, pulse_duration_end, pulse_duration_step, software_averages, points=10, ): platform.reload_settings() data = DataUnits(name=f"data_q{qubit}", quantities={"Time": "ns"}) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="Time[ns]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_duration", "pi_pulse_max_voltage", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "Time[ns]": duration, } data.add(results) count += 1 yield data @plot("MSR vs Gain", plots.gain_msr_phase) def rabi_pulse_gain( platform: AbstractPlatform, qubit: int, pulse_gain_start, pulse_gain_end, pulse_gain_step, software_averages, points=10, ): platform.reload_settings() data = DataUnits(name=f"data_q{qubit}", quantities={"gain": "dimensionless"}) sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for gain in qd_pulse_gain_range: platform.qd_port[qubit].gain = gain if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="gain[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_gain", "pi_pulse_max_voltage", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "gain[dimensionless]": gain, } data.add(results) count += 1 yield data @plot("MSR vs Amplitude", plots.amplitude_msr_phase) def rabi_pulse_amplitude( platform, qubit: int, pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step, software_averages, points=10, ): platform.reload_settings() data = DataUnits(name=f"data_q{qubit}", quantities={"amplitude": "dimensionless"}) sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_amplitude_range = np.arange( pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for amplitude in qd_pulse_amplitude_range: qd_pulse.amplitude = amplitude if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="amplitude[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_amplitude", "pi_pulse_max_voltage", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "amplitude[dimensionless]": amplitude, } data.add(results) count += 1 yield data @plot("MSR vs length and gain", plots.duration_gain_msr_phase) def rabi_pulse_length_and_gain( platform: AbstractPlatform, qubit: int, pulse_duration_start, pulse_duration_end, pulse_duration_step, pulse_gain_start, pulse_gain_end, pulse_gain_step, software_averages, points=10, ): platform.reload_settings() data = DataUnits( name=f"data_q{qubit}", quantities={"duration": "ns", "gain": "dimensionless"} ) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration for gain in qd_pulse_gain_range: platform.qd_port[qubit].gain = gain if count % points == 0 and count > 0: yield data msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "duration[ns]": duration, "gain[dimensionless]": gain, } data.add(results) count += 1 yield data @plot("MSR vs length and amplitude", plots.duration_amplitude_msr_phase) def rabi_pulse_length_and_amplitude( platform, qubit: int, pulse_duration_start, pulse_duration_end, pulse_duration_step, pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step, software_averages, points=10, ): platform.reload_settings() data = DataUnits( name=f"data_q{qubit}", quantities={"duration": "ns", "amplitude": "dimensionless"}, ) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) qd_pulse_amplitude_range = np.arange( pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration for amplitude in qd_pulse_amplitude_range: qd_pulse.amplitude = amplitude if count % points == 0: yield data msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "duration[ns]": duration, "amplitude[dimensionless]": amplitude, } data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/qubit_spectroscopy.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import lorentzian_fit @plot("MSR and Phase vs Frequency", plots.frequency_msr_phase__fast_precision) def qubit_spectroscopy( platform: AbstractPlatform, qubit: int, fast_start, fast_end, fast_step, precision_start, precision_end, precision_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=5000) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=5000) sequence.add(qd_pulse) sequence.add(ro_pulse) qubit_frequency = platform.characterization["single_qubit"][qubit]["qubit_freq"] freqrange = np.arange(fast_start, fast_end, fast_step) + qubit_frequency data = DataUnits(quantities={"frequency": "Hz", "attenuation": "dB"}) # FIXME: Waiting for Qblox platform to take care of that platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) data = DataUnits(name=f"fast_sweep_q{qubit}", quantities={"frequency": "Hz"}) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield data yield lorentzian_fit( data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["qubit_freq", "peak_voltage"], ) platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } data.add(results) count += 1 yield data if platform.resonator_type == "3D": qubit_frequency = data.get_values("frequency", "Hz")[ np.argmin(data.get_values("MSR", "V")) ] avg_voltage = ( np.mean( data.get_values("MSR", "V")[: ((fast_end - fast_start) // fast_step)] ) * 1e6 ) else: qubit_frequency = data.get_values("frequency", "Hz")[ np.argmax(data.get_values("MSR", "V")) ] avg_voltage = ( np.mean( data.get_values("MSR", "V")[: ((fast_end - fast_start) // fast_step)] ) * 1e6 ) prec_data = DataUnits( name=f"precision_sweep_q{qubit}", quantities={"frequency": "Hz"} ) freqrange = ( np.arange(precision_start, precision_end, precision_step) + qubit_frequency ) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield prec_data yield lorentzian_fit( data + prec_data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["qubit_freq", "peak_voltage"], ) platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } prec_data.add(results) count += 1 yield prec_data # TODO: Estimate avg_voltage correctly @plot("MSR and Phase vs Frequency", plots.frequency_flux_msr_phase) def qubit_spectroscopy_flux( platform: AbstractPlatform, qubit: int, freq_width, freq_step, current_max, current_min, current_step, software_averages, fluxline, points=10, ): platform.reload_settings() if fluxline == "qubit": fluxline = qubit sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=5000) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=5000) sequence.add(qd_pulse) sequence.add(ro_pulse) data = DataUnits( name=f"data_q{qubit}", quantities={"frequency": "Hz", "current": "A"} ) qubit_frequency = platform.characterization["single_qubit"][qubit]["qubit_freq"] qubit_biasing_current = platform.characterization["single_qubit"][qubit][ "sweetspot" ] frequency_range = np.arange(-freq_width, freq_width, freq_step) + qubit_frequency current_range = ( np.arange(current_min, current_max, current_step) + qubit_biasing_current ) count = 0 for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/flipping.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import flipping_fit @plot("MSR vs Flips", plots.flips_msr_phase) def flipping( platform: AbstractPlatform, qubit: int, niter, step, points=10, ): platform.reload_settings() pi_pulse_amplitude = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "amplitude" ] data = DataUnits(name=f"data_q{qubit}", quantities={"flips": "dimensionless"}) sequence = PulseSequence() RX90_pulse = platform.create_RX90_pulse(qubit, start=0) count = 0 # repeat N iter times for n in range(0, niter, step): if count % points == 0 and count > 0: yield data yield flipping_fit( data, x="flips[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], niter=niter, pi_pulse_amplitude=pi_pulse_amplitude, labels=["amplitude_delta", "corrected_amplitude"], ) sequence.add(RX90_pulse) # execute sequence RX(pi/2) - [RX(pi) - RX(pi)] from 0...n times - RO start1 = RX90_pulse.duration for j in range(n): RX_pulse1 = platform.create_RX_pulse(qubit, start=start1) start2 = start1 + RX_pulse1.duration RX_pulse2 = platform.create_RX_pulse(qubit, start=start2) sequence.add(RX_pulse1) sequence.add(RX_pulse2) start1 = start2 + RX_pulse2.duration # add ro pulse at the end of the sequence ro_pulse = platform.create_qubit_readout_pulse(qubit, start=start1) sequence.add(ro_pulse) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ro_pulse.serial] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "flips[dimensionless]": n, } data.add(results) count += 1 sequence = PulseSequence() yield data src/qibocal/calibrations/characterization/calibrate_qubit_states.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot @plot("exc vs gnd", plots.exc_gnd) def calibrate_qubit_states( platform: AbstractPlatform, qubit: int, niter, points=10, ): # create exc sequence exc_sequence = PulseSequence() RX_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX_pulse.duration) exc_sequence.add(RX_pulse) exc_sequence.add(ro_pulse) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - RX_pulse.frequency ) data_exc = DataUnits(name=f"data_exc_q{qubit}", quantities={"iteration": "s"}) count = 0 for n in np.arange(niter): if count % points == 0: yield data_exc msr, phase, i, q = platform.execute_pulse_sequence(exc_sequence, nshots=1)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "iteration[s]": n, } data_exc.add(results) count += 1 yield data_exc gnd_sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) gnd_sequence.add(ro_pulse) data_gnd = DataUnits(name=f"data_gnd_q{qubit}", quantities={"iteration": "s"}) count = 0 for n in np.arange(niter): if count % points == 0: yield data_gnd msr, phase, i, q = platform.execute_pulse_sequence(gnd_sequence, nshots=1)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "iteration[s]": n, } data_gnd.add(results) count += 1 yield data_gnd src/qibocal/calibrations/characterization/allXY.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import drag_tunning_fit # allXY rotations gatelist = [ ["I", "I"], ["RX(pi)", "RX(pi)"], ["RY(pi)", "RY(pi)"], ["RX(pi)", "RY(pi)"], ["RY(pi)", "RX(pi)"], ["RX(pi/2)", "I"], ["RY(pi/2)", "I"], ["RX(pi/2)", "RY(pi/2)"], ["RY(pi/2)", "RX(pi/2)"], ["RX(pi/2)", "RY(pi)"], ["RY(pi/2)", "RX(pi)"], ["RX(pi)", "RY(pi/2)"], ["RY(pi)", "RX(pi/2)"], ["RX(pi/2)", "RX(pi)"], ["RX(pi)", "RX(pi/2)"], ["RY(pi/2)", "RY(pi)"], ["RY(pi)", "RY(pi/2)"], ["RX(pi)", "I"], ["RY(pi)", "I"], ["RX(pi/2)", "RX(pi/2)"], ["RY(pi/2)", "RY(pi/2)"], ] @plot("Prob vs gate sequence", plots.prob_gate) def allXY( platform: AbstractPlatform, qubit: int, beta_param=None, software_averages=1, points=10, ): state0_voltage = complex( platform.characterization["single_qubit"][qubit]["state0_voltage"] ) state1_voltage = complex( platform.characterization["single_qubit"][qubit]["state1_voltage"] ) data = DataUnits( name=f"data_q{qubit}", quantities={"probability": "dimensionless", "gateNumber": "dimensionless"}, ) # FIXME: Waiting to be able to pass qpucard to qibolab ro_pulse_test = platform.create_qubit_readout_pulse(qubit, start=4) platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse_test.frequency ) qd_pulse_test = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse_test.frequency ) count = 0 for _ in range(software_averages): gateNumber = 1 for gates in gatelist: if count % points == 0 and count > 0: yield data seq, ro_pulse = _get_sequence_from_gate_pair( platform, gates, qubit, beta_param ) seq.add(ro_pulse) msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=2048)[ ro_pulse.serial ] prob = np.abs(msr * 1e6 - state1_voltage) / np.abs( state1_voltage - state0_voltage ) prob = (2 * prob) - 1 results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "probability[dimensionless]": prob, "gateNumber[dimensionless]": gateNumber, } data.add(results) count += 1 gateNumber += 1 yield data @plot("Prob vs gate sequence", plots.prob_gate_iteration) def allXY_iteration( platform: AbstractPlatform, qubit: int, beta_start, beta_end, beta_step, software_averages=1, points=10, ): # FIXME: Waiting to be able to pass qpucard to qibolab ro_pulse_test = platform.create_qubit_readout_pulse(qubit, start=4) platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse_test.frequency ) qd_pulse_test = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse_test.frequency ) state0_voltage = complex( platform.characterization["single_qubit"][qubit]["state0_voltage"] ) state1_voltage = complex( platform.characterization["single_qubit"][qubit]["state1_voltage"] ) data = DataUnits( name=f"data_q{qubit}", quantities={ "probability": "dimensionless", "gateNumber": "dimensionless", "beta_param": "dimensionless", }, ) count = 0 for _ in range(software_averages): for beta_param in np.arange(beta_start, beta_end, beta_step).round(4): gateNumber = 1 for gates in gatelist: if count % points == 0 and count > 0: yield data seq, ro_pulse = _get_sequence_from_gate_pair( platform, gates, qubit, beta_param ) seq.add(ro_pulse) msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[ ro_pulse.serial ] prob = np.abs(msr * 1e6 - state1_voltage) / np.abs( state1_voltage - state0_voltage ) prob = (2 * prob) - 1 results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "probability[dimensionless]": prob, "gateNumber[dimensionless]": gateNumber, "beta_param[dimensionless]": beta_param, } data.add(results) count += 1 gateNumber += 1 yield data @plot("MSR vs beta parameter", plots.msr_beta) def drag_pulse_tunning( platform: AbstractPlatform, qubit: int, beta_start, beta_end, beta_step, points=10, ): # platform.reload_settings() # FIXME: Waiting to be able to pass qpucard to qibolab ro_pulse_test = platform.create_qubit_readout_pulse(qubit, start=4) platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse_test.frequency ) qd_pulse_test = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse_test.frequency ) data = DataUnits(name=f"data_q{qubit}", quantities={"beta_param": "dimensionless"}) count = 0 for beta_param in np.arange(beta_start, beta_end, beta_step).round(4): if count % points == 0 and count > 0: yield data yield drag_tunning_fit( data, x="beta_param[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "optimal_beta_param", ], ) # drag pulse RX(pi/2) RX90_drag_pulse = platform.create_RX90_drag_pulse( qubit, start=0, beta=beta_param ) # drag pulse RY(pi) RY_drag_pulse = platform.create_RX_drag_pulse( qubit, start=RX90_drag_pulse.finish, relative_phase=+np.pi / 2, beta=beta_param, ) # RO pulse ro_pulse = platform.create_qubit_readout_pulse( qubit, start=RY_drag_pulse.finish ) # Rx(pi/2) - Ry(pi) - Ro seq1 = PulseSequence() seq1.add(RX90_drag_pulse) seq1.add(RY_drag_pulse) seq1.add(ro_pulse) msr1, i1, q1, phase1 = platform.execute_pulse_sequence(seq1)[ro_pulse.serial] # drag pulse RY(pi/2) RY90_drag_pulse = platform.create_RX90_drag_pulse( qubit, start=0, relative_phase=np.pi / 2, beta=beta_param ) # drag pulse RX(pi) RX_drag_pulse = platform.create_RX_drag_pulse( qubit, start=RY90_drag_pulse.finish, beta=beta_param ) # Ry(pi/2) - Rx(pi) - Ro seq2 = PulseSequence() seq2.add(RY90_drag_pulse) seq2.add(RX_drag_pulse) seq2.add(ro_pulse) msr2, phase2, i2, q2 = platform.execute_pulse_sequence(seq2)[ro_pulse.serial] results = { "MSR[V]": msr1 - msr2, "i[V]": i1 - i2, "q[V]": q1 - q2, "phase[deg]": phase1 - phase2, "beta_param[dimensionless]": beta_param, } data.add(results) count += 1 yield data def _get_sequence_from_gate_pair(platform: AbstractPlatform, gates, qubit, beta_param): pulse_duration = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "duration" ] # All gates have equal pulse duration sequence = PulseSequence() sequenceDuration = 0 pulse_start = 0 for gate in gates: if gate == "I": # print("Transforming to sequence I gate") pass if gate == "RX(pi)": # print("Transforming to sequence RX(pi) gate") if beta_param == None: RX_pulse = platform.create_RX_pulse( qubit, start=pulse_start, ) else: RX_pulse = platform.create_RX_drag_pulse( qubit, start=pulse_start, beta=beta_param, ) sequence.add(RX_pulse) if gate == "RX(pi/2)": # print("Transforming to sequence RX(pi/2) gate") if beta_param == None: RX90_pulse = platform.create_RX90_pulse( qubit, start=pulse_start, ) else: RX90_pulse = platform.create_RX90_drag_pulse( qubit, start=pulse_start, beta=beta_param, ) sequence.add(RX90_pulse) if gate == "RY(pi)": # print("Transforming to sequence RY(pi) gate") if beta_param == None: RY_pulse = platform.create_RX_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, ) else: RY_pulse = platform.create_RX_drag_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, beta=beta_param, ) sequence.add(RY_pulse) if gate == "RY(pi/2)": # print("Transforming to sequence RY(pi/2) gate") if beta_param == None: RY90_pulse = platform.create_RX90_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, ) else: RY90_pulse = platform.create_RX90_drag_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, beta=beta_param, ) sequence.add(RY90_pulse) sequenceDuration = sequenceDuration + pulse_duration pulse_start = pulse_duration # RO pulse starting just after pair of gates ro_pulse = platform.create_qubit_readout_pulse(qubit, start=sequenceDuration + 4) return sequence, ro_pulse src/qibocal/calibrations/characterization/__init__.py METASEP src/qibocal/web/server.py METASEP # -*- coding: utf-8 -*- import os import pathlib import yaml from flask import Flask, render_template from qibocal import __version__ from qibocal.cli.builders import ReportBuilder server = Flask(__name__) @server.route("/") @server.route("/data/<path>") def page(path=None): folders = [ folder for folder in reversed(sorted(os.listdir(os.getcwd()))) if os.path.isdir(folder) and "meta.yml" in os.listdir(folder) ] report = None if path is not None: try: report = ReportBuilder(path) except (FileNotFoundError, TypeError): pass return render_template( "template.html", version=__version__, folders=folders, report=report, ) src/qibocal/web/report.py METASEP # -*- coding: utf-8 -*- import os import pathlib from jinja2 import Environment, FileSystemLoader from qibocal import __version__ from qibocal.cli.builders import ReportBuilder def create_report(path): """Creates an HTML report for the data in the given path.""" filepath = pathlib.Path(__file__) with open(os.path.join(filepath.with_name("static"), "styles.css")) as file: css_styles = f"<style>\n{file.read()}\n</style>" report = ReportBuilder(path) env = Environment(loader=FileSystemLoader(filepath.with_name("templates"))) template = env.get_template("template.html") html = template.render( is_static=True, css_styles=css_styles, version=__version__, report=report, ) with open(os.path.join(path, "index.html"), "w") as file: file.write(html) src/qibocal/web/app.py METASEP # -*- coding: utf-8 -*- import os import pandas as pd import yaml from dash import Dash, Input, Output, dcc, html from qibocal import plots from qibocal.data import DataUnits from qibocal.web.server import server DataUnits() # dummy dataset call to suppress ``pint[V]`` error app = Dash( server=server, suppress_callback_exceptions=True, ) app.layout = html.Div( [ dcc.Location(id="url", refresh=False), dcc.Graph(id="graph", figure={}), dcc.Interval( id="interval", # TODO: Perhaps the user should be allowed to change the refresh rate interval=1000, n_intervals=0, disabled=False, ), ] ) @app.callback( Output("graph", "figure"), Input("interval", "n_intervals"), Input("graph", "figure"), Input("url", "pathname"), ) def get_graph(n, current_figure, url): method, folder, routine, qubit, format = url.split(os.sep)[2:] try: # data = DataUnits.load_data(folder, routine, format, "precision_sweep") # with open(f"{folder}/platform.yml", "r") as f: # nqubits = yaml.safe_load(f)["nqubits"] # if len(data) > 2: # params, fit = resonator_spectroscopy_fit(folder, format, nqubits) # else: # params, fit = None, None # return getattr(plots.resonator_spectroscopy, method)(data, params, fit) # # FIXME: Temporarily hardcode the plotting method to test # # multiple routines with different names in one folder # # should be changed to: # # return getattr(getattr(plots, routine), method)(data) return getattr(plots, method)(folder, routine, qubit, format) except (FileNotFoundError, pd.errors.EmptyDataError): return current_figure src/qibocal/web/__init__.py METASEP src/qibocal/tests/test_data.py METASEP # -*- coding: utf-8 -*- """Some tests for the Data and DataUnits class""" import numpy as np import pytest from pint import DimensionalityError, UndefinedUnitError from qibocal.data import Data, DataUnits def random_data_units(length, options=None): data = DataUnits(options=options) for l in range(length): msr, i, q, phase = np.random.rand(4) pulse_sequence_result = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, } add_options = {} if options is not None: for option in options: add_options[option] = str(l) data.add({**pulse_sequence_result, **add_options}) return data def random_data(length): data = Data() for i in range(length): data.add({"int": int(i), "float": float(i), "string": str(i), "bool": bool(i)}) return data def test_data_initialization(): """Test DataUnits constructor""" data = DataUnits() assert len(data.df.columns) == 4 assert list(data.df.columns) == [ # pylint: disable=E1101 "MSR", "i", "q", "phase", ] data1 = DataUnits(quantities={"attenuation": "dB"}) assert len(data1.df.columns) == 5 assert list(data1.df.columns) == [ # pylint: disable=E1101 "attenuation", "MSR", "i", "q", "phase", ] data2 = DataUnits(quantities={"attenuation": "dB"}, options=["option1"]) assert len(data2.df.columns) == 6 assert list(data2.df.columns) == [ # pylint: disable=E1101 "option1", "attenuation", "MSR", "i", "q", "phase", ] def test_data_units_units(): """Test units of measure in DataUnits""" data_units = DataUnits() assert data_units.df.MSR.values.units == "volt" data_units1 = DataUnits(quantities={"frequency": "Hz"}) assert data_units1.df.frequency.values.units == "hertz" with pytest.raises(UndefinedUnitError): data_units2 = DataUnits(quantities={"fake_unit": "fake"}) def test_data_units_add(): """Test add method of DataUnits""" data_units = random_data_units(5) assert len(data_units) == 5 data_units1 = DataUnits(quantities={"attenuation": "dB"}) msr, i, q, phase, att = np.random.rand(len(data_units1.df.columns)) data_units1.add( { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "attenuation[dB]": att, } ) assert len(data_units1) == 1 data_units1.add( { "MSR[V]": 0, "i[V]": 0.0, "q[V]": 0.0, "phase[deg]": 0, "attenuation[dB]": 1, } ) assert len(data_units1) == 2 data_units2 = DataUnits() msr, i, q, phase = np.random.rand(len(data_units2.df.columns)) with pytest.raises(DimensionalityError): data_units2.add({"MSR[dB]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) with pytest.raises(UndefinedUnitError): data_units2.add({"MSR[test]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) data_units3 = random_data_units(10, options=["test"]) assert len(data_units3) == 10 def test_data_add(): """Test add method of Data class""" data = random_data(5) assert len(data) == 5 data.add({"int": 123, "float": 123.456, "string": "123", "bool": True}) assert len(data) == 6 def test_data_units_load_data_from_dict(): """Test set method of DataUnits class""" data_units = DataUnits() test = { "MSR[V]": [1, 2, 3], "i[V]": [3.0, 4.0, 5.0], "q[V]": np.array([3, 4, 5]), "phase[deg]": [6.0, 7.0, 8.0], } data_units.load_data_from_dict(test) assert len(data_units) == 3 assert (data_units.get_values("MSR", "V") == [1, 2, 3]).all() assert (data_units.get_values("i", "V") == [3.0, 4.0, 5.0]).all() assert (data_units.get_values("q", "V") == [3, 4, 5]).all() assert (data_units.get_values("phase", "deg") == [6.0, 7.0, 8.0]).all() data_units1 = DataUnits(options=["option1", "option2"]) test = {"option1": ["one", "two", "three"], "option2": [1, 2, 3]} data_units1.load_data_from_dict(test) assert len(data_units1) == 3 assert (data_units1.get_values("option1") == ["one", "two", "three"]).all() assert (data_units1.get_values("option2") == [1, 2, 3]).all() def test_data_load_data_from_dict(): """Test set method of Data class""" data = random_data(5) test = { "int": [1, 2, 3], "float": [3.0, 4.0, 5.0], "string": ["one", "two", "three"], "bool": [True, False, True], } data.load_data_from_dict(test) assert len(data) == 3 assert (data.get_values("int") == [1, 2, 3]).all() assert (data.get_values("float") == [3.0, 4.0, 5.0]).all() assert (data.get_values("string") == ["one", "two", "three"]).all() assert (data.get_values("bool") == [True, False, True]).all() def test_get_values_data_units(): """Test get_values method of DataUnits class""" data_units = random_data_units(5, options=["option"]) assert (data_units.get_values("option") == data_units.df["option"]).all() assert ( data_units.get_values("MSR", "uV") == data_units.df["MSR"].pint.to("uV").pint.magnitude ).all() def test_get_values_data(): """Test get_values method of Data class""" data = random_data(5) assert (data.get_values("int") == data.df["int"]).all() src/qibocal/fitting/utils.py METASEP # -*- coding: utf-8 -*- import re import numpy as np def lorenzian(frequency, amplitude, center, sigma, offset): # http://openafox.com/science/peak-function-derivations.html return (amplitude / np.pi) * ( sigma / ((frequency - center) ** 2 + sigma**2) ) + offset def rabi(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # Period T : 1/p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(-x * p4) def ramsey(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # DeltaFreq : p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(-x * p4) def exp(x, *p): return p[0] - p[1] * np.exp(-1 * x * p[2]) def flipping(x, p0, p1, p2, p3): # A fit to Flipping Qubit oscillation # Epsilon?? shoule be Amplitude : p[0] # Offset : p[1] # Period of oscillation : p[2] # phase for the first point corresponding to pi/2 rotation : p[3] return np.sin(x * 2 * np.pi / p2 + p3) * p0 + p1 def cos(x, p0, p1, p2, p3): # Offset : p[0] # Amplitude : p[1] # Period : p[2] # Phase : p[3] return p0 + p1 * np.cos(2 * np.pi * x / p2 + p3) def parse(key): name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) return name, unit src/qibocal/fitting/methods.py METASEP # -*- coding: utf-8 -*- """Routine-specific method for post-processing data acquired.""" import lmfit import numpy as np from scipy.optimize import curve_fit from qibocal.config import log from qibocal.data import Data from qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey def lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None): """Fitting routine for resonator spectroscopy""" if fit_file_name == None: data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", labels[1], labels[0], ], ) else: data_fit = Data( name=fit_file_name + f"_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", labels[1], labels[0], ], ) frequencies = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) # Create a lmfit model for fitting equation defined in resonator_peak model_Q = lmfit.Model(lorenzian) # Guess parameters for Lorentzian max or min if (nqubits == 1 and labels[0] == "resonator_freq") or ( nqubits != 1 and labels[0] == "qubit_freq" ): guess_center = frequencies[ np.argmax(voltages) ] # Argmax = Returns the indices of the maximum values along an axis. guess_offset = np.mean( voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))] ) guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center) guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi else: guess_center = frequencies[ np.argmin(voltages) ] # Argmin = Returns the indices of the minimum values along an axis. guess_offset = np.mean( voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))] ) guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center) guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi # Add guessed parameters to the model model_Q.set_param_hint("center", value=guess_center, vary=True) model_Q.set_param_hint("sigma", value=guess_sigma, vary=True) model_Q.set_param_hint("amplitude", value=guess_amp, vary=True) model_Q.set_param_hint("offset", value=guess_offset, vary=True) guess_parameters = model_Q.make_params() # fit the model with the data and guessed parameters try: fit_res = model_Q.fit( data=voltages, frequency=frequencies, params=guess_parameters ) except: log.warning("The fitting was not successful") return data_fit # get the values for postprocessing and for legend. f0 = fit_res.best_values["center"] BW = fit_res.best_values["sigma"] * 2 Q = abs(f0 / BW) peak_voltage = ( fit_res.best_values["amplitude"] / (fit_res.best_values["sigma"] * np.pi) + fit_res.best_values["offset"] ) freq = f0 * 1e9 data_fit.add( { labels[1]: peak_voltage, labels[0]: freq, "popt0": fit_res.best_values["amplitude"], "popt1": fit_res.best_values["center"], "popt2": fit_res.best_values["sigma"], "popt3": fit_res.best_values["offset"], } ) return data_fit def rabi_fit(data, x, y, qubit, nqubits, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", "popt4", labels[0], labels[1], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmin(voltages.values)], np.pi / 2, 0.1e-6, ] else: pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmax(voltages.values)], np.pi / 2, 0.1e-6, ] try: popt, pcov = curve_fit( rabi, time.values, voltages.values, p0=pguess, maxfev=10000 ) smooth_dataset = rabi(time.values, *popt) pi_pulse_duration = np.abs((1.0 / popt[2]) / 2) pi_pulse_max_voltage = smooth_dataset.max() t2 = 1.0 / popt[4] # double check T1 except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], "popt4": popt[4], labels[0]: pi_pulse_duration, labels[1]: pi_pulse_max_voltage, } ) return data_fit def ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", "popt4", labels[0], labels[1], labels[2], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmin(voltages.values)], np.pi / 2, 500e-9, ] try: popt, pcov = curve_fit( ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000 ) delta_fitting = popt[2] delta_phys = int((delta_fitting * sampling_rate) - offset_freq) corrected_qubit_frequency = int(qubit_freq + delta_phys) t2 = 1.0 / popt[4] except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], "popt4": popt[4], labels[0]: delta_phys, labels[1]: corrected_qubit_frequency, labels[2]: t2, } ) return data_fit def t1_fit(data, x, y, qubit, nqubits, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", labels[0], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [ max(voltages.values), (max(voltages.values) - min(voltages.values)), 1 / 250, ] else: pguess = [ min(voltages.values), (max(voltages.values) - min(voltages.values)), 1 / 250, ] try: popt, pcov = curve_fit( exp, time.values, voltages.values, p0=pguess, maxfev=2000000 ) t1 = abs(1 / popt[2]) except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], labels[0]: t1, } ) return data_fit def flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", labels[0], labels[1], ], ) flips = data.get_values(*parse(x)) # Check X data stores. N flips or i? voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [0.0003, np.mean(voltages), -18, 0] # epsilon guess parameter else: pguess = [0.0003, np.mean(voltages), 18, 0] # epsilon guess parameter try: popt, pcov = curve_fit(flipping, flips, voltages, p0=pguess, maxfev=2000000) epsilon = -np.pi / popt[2] amplitude_delta = np.pi / (np.pi + epsilon) corrected_amplitude = amplitude_delta * pi_pulse_amplitude # angle = (niter * 2 * np.pi / popt[2] + popt[3]) / (1 + 4 * niter) # amplitude_delta = angle * 2 / np.pi * pi_pulse_amplitude except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], labels[0]: amplitude_delta, labels[1]: corrected_amplitude, } ) return data_fit def drag_tunning_fit(data, x, y, qubit, nqubits, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", labels[0], ], ) beta_params = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) pguess = [ 0, # Offset: p[0] beta_params.values[np.argmax(voltages)] - beta_params.values[np.argmin(voltages)], # Amplitude: p[1] 4, # Period: p[2] 0.3, # Phase: p[3] ] try: popt, pcov = curve_fit(cos, beta_params.values, voltages.values) smooth_dataset = cos(beta_params.values, popt[0], popt[1], popt[2], popt[3]) beta_optimal = beta_params.values[np.argmin(smooth_dataset)] except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], labels[0]: beta_optimal, } ) return data_fit src/qibocal/fitting/__init__.py METASEP src/qibocal/cli/builders.py METASEP # -*- coding: utf-8 -*- import datetime import inspect import os import shutil import yaml from qibocal import calibrations from qibocal.config import log, raise_error from qibocal.data import Data def load_yaml(path): """Load yaml file from disk.""" with open(path) as file: data = yaml.safe_load(file) return data class ActionBuilder: """Class for parsing and executing runcards. Args: runcard (path): path containing the runcard. folder (path): path for the output folder. force (bool): option to overwrite the output folder if it exists already. """ def __init__(self, runcard, folder=None, force=False): path, self.folder = self._generate_output_folder(folder, force) self.runcard = load_yaml(runcard) # Qibolab default backend if not provided in runcard. backend_name = self.runcard.get("backend", "qibolab") platform_name = self.runcard.get("platform", "dummy") self.backend, self.platform = self._allocate_backend( backend_name, platform_name, path ) self.qubits = self.runcard["qubits"] self.format = self.runcard["format"] # Saving runcard shutil.copy(runcard, f"{path}/runcard.yml") self.save_meta(path, self.folder) @staticmethod def _generate_output_folder(folder, force): """Static method for generating the output folder. Args: folder (path): path for the output folder. If None it will be created a folder automatically force (bool): option to overwrite the output folder if it exists already. """ if folder is None: import getpass e = datetime.datetime.now() user = getpass.getuser().replace(".", "-") date = e.strftime("%Y-%m-%d") folder = f"{date}-{'000'}-{user}" num = 0 while os.path.exists(folder): log.info(f"Directory {folder} already exists.") num += 1 folder = f"{date}-{str(num).rjust(3, '0')}-{user}" log.info(f"Trying to create directory {folder}") elif os.path.exists(folder) and not force: raise_error(RuntimeError, f"Directory {folder} already exists.") elif os.path.exists(folder) and force: log.warning(f"Deleting previous directory {folder}.") shutil.rmtree(os.path.join(os.getcwd(), folder)) path = os.path.join(os.getcwd(), folder) log.info(f"Creating directory {folder}.") os.makedirs(path) return path, folder def _allocate_backend(self, backend_name, platform_name, path): """Allocate the platform using Qibolab.""" from qibo.backends import GlobalBackend, set_backend if backend_name == "qibolab": from qibolab.paths import qibolab_folder original_runcard = qibolab_folder / "runcards" / f"{platform_name}.yml" # copy of the original runcard that will stay unmodified shutil.copy(original_runcard, f"{path}/platform.yml") # copy of the original runcard that will be modified during calibration updated_runcard = f"{self.folder}/new_platform.yml" shutil.copy(original_runcard, updated_runcard) # allocate backend with updated_runcard set_backend( backend=backend_name, platform=platform_name, runcard=updated_runcard ) backend = GlobalBackend() return backend, backend.platform else: set_backend(backend=backend_name, platform=platform_name) backend = GlobalBackend() return backend, None def save_meta(self, path, folder): import qibocal e = datetime.datetime.now(datetime.timezone.utc) meta = {} meta["title"] = folder meta["backend"] = str(self.backend) meta["platform"] = str(self.backend.platform) meta["date"] = e.strftime("%Y-%m-%d") meta["start-time"] = e.strftime("%H:%M:%S") meta["end-time"] = e.strftime("%H:%M:%S") meta["versions"] = self.backend.versions # pylint: disable=E1101 meta["versions"]["qibocal"] = qibocal.__version__ with open(f"{path}/meta.yml", "w") as file: yaml.dump(meta, file) def _build_single_action(self, name): """Helper method to parse the actions in the runcard.""" f = getattr(calibrations, name) path = os.path.join(self.folder, f"data/{name}/") os.makedirs(path) sig = inspect.signature(f) params = self.runcard["actions"][name] for param in list(sig.parameters)[2:-1]: if param not in params: raise_error(AttributeError, f"Missing parameter {param} in runcard.") if f.__annotations__["qubit"] == int: single_qubit_action = True else: single_qubit_action = False return f, params, path, single_qubit_action def execute(self): """Method to execute sequentially all the actions in the runcard.""" if self.platform is not None: self.platform.connect() self.platform.setup() self.platform.start() for action in self.runcard["actions"]: routine, args, path, single_qubit_action = self._build_single_action(action) self._execute_single_action(routine, args, path, single_qubit_action) if self.platform is not None: self.platform.stop() self.platform.disconnect() def _execute_single_action(self, routine, arguments, path, single_qubit): """Method to execute a single action and retrieving the results.""" if self.format is None: raise_error(ValueError, f"Cannot store data using {self.format} format.") if single_qubit: for qubit in self.qubits: results = routine(self.platform, qubit, **arguments) for data in results: getattr(data, f"to_{self.format}")(path) if self.platform is not None: self.update_platform_runcard(qubit, routine.__name__) else: results = routine(self.platform, self.qubits, **arguments) for data in results: getattr(data, f"to_{self.format}")(path) if self.platform is not None: self.update_platform_runcard(qubit, routine.__name__) def update_platform_runcard(self, qubit, routine): try: data_fit = Data.load_data( self.folder, routine, self.format, f"fit_q{qubit}" ) except: data_fit = Data() params = [i for i in list(data_fit.df.keys()) if "popt" not in i] settings = load_yaml(f"{self.folder}/new_platform.yml") for param in params: settings["characterization"]["single_qubit"][qubit][param] = int( data_fit.get_values(param) ) with open(f"{self.folder}/new_platform.yml", "w") as file: yaml.dump( settings, file, sort_keys=False, indent=4, default_flow_style=None ) def dump_report(self): from qibocal.web.report import create_report # update end time meta = load_yaml(f"{self.folder}/meta.yml") e = datetime.datetime.now(datetime.timezone.utc) meta["end-time"] = e.strftime("%H:%M:%S") with open(f"{self.folder}/meta.yml", "w") as file: yaml.dump(meta, file) create_report(self.folder) class ReportBuilder: """Parses routines and plots to report and live plotting page. Args: path (str): Path to the data folder to generate report for. """ def __init__(self, path): self.path = path self.metadata = load_yaml(os.path.join(path, "meta.yml")) # find proper path title base, self.title = os.path.join(os.getcwd(), path), "" while self.title in ("", "."): base, self.title = os.path.split(base) self.runcard = load_yaml(os.path.join(path, "runcard.yml")) self.format = self.runcard.get("format") self.qubits = self.runcard.get("qubits") # create calibration routine objects # (could be incorporated to :meth:`qibocal.cli.builders.ActionBuilder._build_single_action`) self.routines = [] for action in self.runcard.get("actions"): if hasattr(calibrations, action): routine = getattr(calibrations, action) else: raise_error(ValueError, f"Undefined action {action} in report.") if not hasattr(routine, "plots"): routine.plots = [] self.routines.append(routine) def get_routine_name(self, routine): """Prettify routine's name for report headers.""" return routine.__name__.replace("_", " ").title() def get_figure(self, routine, method, qubit): """Get html figure for report. Args: routine (Callable): Calibration method. method (Callable): Plot method. qubit (int): Qubit id. """ import tempfile figure = method(self.path, routine.__name__, qubit, self.format) with tempfile.NamedTemporaryFile() as temp: figure.write_html(temp.name, include_plotlyjs=False, full_html=False) fightml = temp.read().decode("utf-8") return fightml def get_live_figure(self, routine, method, qubit): """Get url to dash page for live plotting. This url is used by :meth:`qibocal.web.app.get_graph`. Args: routine (Callable): Calibration method. method (Callable): Plot method. qubit (int): Qubit id. """ return os.path.join( method.__name__, self.path, routine.__name__, str(qubit), self.format, ) src/qibocal/cli/_base.py METASEP # -*- coding: utf-8 -*- """Adds global CLI options.""" import base64 import pathlib import shutil import socket import subprocess import uuid from urllib.parse import urljoin import click from qibo.config import log, raise_error from qibocal.cli.builders import ActionBuilder CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) # options for report upload UPLOAD_HOST = ( "qibocal@localhost" if socket.gethostname() == "saadiyat" else "[email protected]" ) TARGET_DIR = "qibocal-reports/" ROOT_URL = "http://login.qrccluster.com:9000/" @click.command(context_settings=CONTEXT_SETTINGS) @click.argument("runcard", metavar="RUNCARD", type=click.Path(exists=True)) @click.option( "folder", "-o", type=click.Path(), help="Output folder. If not provided a standard name will generated.", ) @click.option( "force", "-f", is_flag=True, help="Use --force option to overwrite the output folder.", ) def command(runcard, folder, force=None): """qibocal: Quantum Calibration Verification and Validation using Qibo. Arguments: - RUNCARD: runcard with declarative inputs. """ builder = ActionBuilder(runcard, folder, force) builder.execute() builder.dump_report() @click.command(context_settings=CONTEXT_SETTINGS) @click.option( "port", "-p", "--port", default=8050, type=int, help="Localhost port to launch dash server.", ) @click.option( "debug", "-d", "--debug", is_flag=True, help="Launch server in debugging mode.", ) def live_plot(port, debug): """Real time plotting of calibration data on a dash server.""" import socket from qibocal.web.app import app # change port if it is already used while True: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: if s.connect_ex(("localhost", port)) != 0: break port += 1 app.run_server(debug=debug, port=port) @click.command(context_settings=CONTEXT_SETTINGS) @click.argument("output_folder", metavar="FOLDER", type=click.Path(exists=True)) def upload(output_folder): """Uploads output folder to server""" output_path = pathlib.Path(output_folder) # check the rsync command exists. if not shutil.which("rsync"): raise_error( RuntimeError, "Could not find the rsync command. Please make sure it is installed.", ) # check that we can authentica with a certificate ssh_command_line = ( "ssh", "-o", "PreferredAuthentications=publickey", "-q", UPLOAD_HOST, "exit", ) str_line = " ".join(repr(ele) for ele in ssh_command_line) log.info(f"Checking SSH connection to {UPLOAD_HOST}.") try: subprocess.run(ssh_command_line, check=True) except subprocess.CalledProcessError as e: raise RuntimeError( ( "Could not validate the SSH key. " "The command\n%s\nreturned a non zero exit status. " "Please make sure that your public SSH key is on the server." ) % str_line ) from e except OSError as e: raise RuntimeError( "Could not run the command\n{}\n: {}".format(str_line, e) ) from e log.info("Connection seems OK.") # upload output randname = base64.urlsafe_b64encode(uuid.uuid4().bytes).decode() newdir = TARGET_DIR + randname rsync_command = ( "rsync", "-aLz", "--chmod=ug=rwx,o=rx", f"{output_path}/", f"{UPLOAD_HOST}:{newdir}", ) log.info(f"Uploading output ({output_path}) to {UPLOAD_HOST}") try: subprocess.run(rsync_command, check=True) except subprocess.CalledProcessError as e: msg = f"Failed to upload output: {e}" raise RuntimeError(msg) from e url = urljoin(ROOT_URL, randname) log.info(f"Upload completed. The result is available at:\n{url}") src/qibocal/cli/__init__.py METASEP # -*- coding: utf-8 -*- """CLI entry point.""" from ._base import command, live_plot, upload src/qibocal/calibrations/__init__.py METASEP # -*- coding: utf-8 -*- from qibocal.calibrations.characterization.allXY import * from qibocal.calibrations.characterization.calibrate_qubit_states import * from qibocal.calibrations.characterization.flipping import * from qibocal.calibrations.characterization.qubit_spectroscopy import * from qibocal.calibrations.characterization.rabi_oscillations import * from qibocal.calibrations.characterization.ramsey import * from qibocal.calibrations.characterization.resonator_spectroscopy import * from qibocal.calibrations.characterization.t1 import * from qibocal.calibrations.protocols.test import * src/qibocal/decorators.py METASEP # -*- coding: utf-8 -*- """Decorators implementation.""" import os from qibocal.config import raise_error def plot(header, method): """Decorator for adding plots in the report and live plotting page. Args: header (str): Header of the plot to use in the report. method (Callable): Plotting method defined under ``qibocal.plots``. """ def wrapped(f): if hasattr(f, "plots"): # insert in the beginning of the list to have # proper plot ordering in the report f.plots.insert(0, (header, method)) else: f.plots = [(header, method)] return f return wrapped src/qibocal/data.py METASEP # -*- coding: utf-8 -*- """Implementation of DataUnits and Data class to store calibration routines outputs.""" import re from abc import abstractmethod import numpy as np import pandas as pd import pint_pandas from qibocal.config import raise_error class AbstractData: def __init__(self, name=None): if name is None: self.name = "data" else: self.name = name self.df = pd.DataFrame() self.quantities = None def __add__(self, data): self.df = pd.concat([self.df, data.df], ignore_index=True) return self @abstractmethod def add(self, data): raise_error(NotImplementedError) def __len__(self): """Computes the length of the data.""" return len(self.df) @classmethod def load_data(cls, folder, routine, format, name): raise_error(NotImplementedError) @abstractmethod def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" if self.quantities == None: self.df.to_csv(f"{path}/{self.name}.csv") else: self.df.pint.dequantify().to_csv(f"{path}/{self.name}.csv") def to_pickle(self, path): """Save data in pickel file. Args: path (str): Path containing output folder.""" self.df.to_pickle(f"{path}/{self.name}.pkl") class DataUnits(AbstractData): """Class to store the data measured during the calibration routines. It is a wrapper to a pandas DataFrame with units of measure from the Pint library. Args: quantities (dict): dictionary containing additional quantities that the user may save other than the pulse sequence output. The keys are the name of the quantities and the corresponding values are the units of measure. options (list): list containing additional values to be saved. """ def __init__(self, name=None, quantities=None, options=None): super().__init__(name=name) self._df = pd.DataFrame( { "MSR": pd.Series(dtype="pint[V]"), "i": pd.Series(dtype="pint[V]"), "q": pd.Series(dtype="pint[V]"), "phase": pd.Series(dtype="pint[deg]"), } ) self.quantities = {"MSR": "V", "i": "V", "q": "V", "phase": "rad"} self.options = [] if quantities is not None: self.quantities.update(quantities) for name, unit in quantities.items(): self.df.insert(0, name, pd.Series(dtype=f"pint[{unit}]")) if options is not None: self.options = options for option in options: self.df.insert( # pylint: disable=E1101 0, option, pd.Series(dtype=object) ) from pint import UnitRegistry self.ureg = UnitRegistry() @property def df(self): return self._df @df.setter def df(self, df): """Set df attribute. Args: df (pd.DataFrame): pandas DataFrame. Every key should have the following form: ``<name>[<unit>]``. """ if isinstance(df, pd.DataFrame): self._df = df else: raise_error(TypeError, f"{df.type} is not a pd.DataFrame.") def load_data_from_dict(self, data: dict): """Set df attribute. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ processed_data = {} for key, values in data.items(): if "[" in key: name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) processed_data[name] = pd.Series( data=(np.array(values) * self.ureg(unit)), dtype=f"pint[{unit}]" ) else: processed_data[key] = pd.Series(data=(values), dtype=object) self._df = pd.DataFrame(processed_data) def add(self, data): """Add a row to `DataUnits`. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ l = len(self) for key, value in data.items(): if "[" in key: name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) # TODO: find a better way to do this self.df.loc[l, name] = np.array(value) * self.ureg(unit) else: self.df.loc[l, key] = value def get_values(self, key, unit=None): """Get values of a quantity in specified units. Args: quantity (str): Quantity to get the values of. unit (str): Unit of the returned values. Returns: ``pd.Series`` with the quantity values in the given units. """ if unit is None: return self.df[key] else: return self.df[key].pint.to(unit).pint.magnitude @classmethod def load_data(cls, folder, routine, format, name): """Load data from specific format. Args: folder (path): path to the output folder from which the data will be loaded routine (str): calibration routine data to be loaded format (str): data format. Possible choices are 'csv' and 'pickle'. Returns: data (``DataUnits``): dataset object with the loaded data. """ obj = cls() if format == "csv": file = f"{folder}/data/{routine}/{name}.csv" obj.df = pd.read_csv(file, header=[0, 1]) obj.df.pop("Unnamed: 0_level_0") quantities_label = [] obj.options = [] for column in obj.df.columns: # pylint: disable=E1101 if "Unnamed" not in column[1]: quantities_label.append(column[0]) else: obj.options.append(column[0]) quantities_df = obj.df[quantities_label].pint.quantify() options_df = obj.df[obj.options] options_df.columns = options_df.columns.droplevel(1) obj.df = pd.concat([quantities_df, options_df], axis=1) elif format == "pickle": file = f"{folder}/data/{routine}/{name}.pkl" obj.df = pd.read_pickle(file) else: raise_error(ValueError, f"Cannot load data using {format} format.") return obj def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" data = self.df[list(self.quantities)].pint.dequantify() firsts = data.index.get_level_values(None) data[self.options] = self.df[self.options].loc[firsts].values data.to_csv(f"{path}/{self.name}.csv") class Data(AbstractData): """Class to store the data obtained from calibration routines. It is a wrapper to a pandas DataFrame. Args: quantities (dict): dictionary quantities to be saved. """ def __init__(self, name=None, quantities=None): super().__init__(name=name) if quantities is not None: self.quantities = quantities for name in quantities: self.df.insert(0, name, pd.Series(dtype=object)) @property def df(self): return self._df @df.setter def df(self, data): """Set df attribute. Args: df (pd.DataFrame): """ if isinstance(data, pd.DataFrame): self._df = data def load_data_from_dict(self, data: dict): """Set df attribute. Args: df (dict): dictionary containing the data to be added. """ processed_data = {} for key, values in data.items(): processed_data[key] = pd.Series(data=(values), dtype=object) self._df = pd.DataFrame(processed_data) def add(self, data): """Add a row to data. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ l = len(self) for key, value in data.items(): self.df.loc[l, key] = value def get_values(self, quantity): """Get values of a quantity in specified units. Args: quantity (str): Quantity to get the values of. Returns: ``pd.Series`` with the quantity values in the given units. """ return self.df[quantity].values @classmethod def load_data(cls, folder, routine, format, name): """Load data from specific format. Args: folder (path): path to the output folder from which the data will be loaded routine (str): calibration routine data to be loaded format (str): data format. Possible choices are 'csv' and 'pickle'. Returns: data (``Data``): data object with the loaded data. """ obj = cls() if format == "csv": file = f"{folder}/data/{routine}/{name}.csv" obj.df = pd.read_csv(file) obj.df.pop("Unnamed: 0") elif format == "pickle": file = f"{folder}/data/{routine}/{name}.pkl" obj.df = pd.read_pickle(file) else: raise_error(ValueError, f"Cannot load data using {format} format.") return obj def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" self.df.to_csv(f"{path}/{self.name}.csv") def to_pickle(self, path): """Save data in pickel file. Args: path (str): Path containing output folder.""" self.df.to_pickle(f"{path}/{self.name}.pkl") src/qibocal/config.py METASEP # -*- coding: utf-8 -*- """Custom logger implemenation.""" import logging import os # Logging levels available here https://docs.python.org/3/library/logging.html#logging-levels QIBOCAL_LOG_LEVEL = 30 if "QIBOCAL_LOG_LEVEL" in os.environ: # pragma: no cover QIBOCAL_LOG_LEVEL = 10 * int(os.environ.get("QIBOCAL_LOG_LEVEL")) def raise_error(exception, message=None, args=None): """Raise exception with logging error. Args: exception (Exception): python exception. message (str): the error message. """ log.error(message) if args: raise exception(message, args) else: raise exception(message) # Configuration for logging mechanism class CustomHandler(logging.StreamHandler): """Custom handler for logging algorithm.""" def format(self, record): """Format the record with specific format.""" from qibocal import __version__ fmt = f"[Qibocal {__version__}|%(levelname)s|%(asctime)s]: %(message)s" grey = "\x1b[38;20m" green = "\x1b[92m" yellow = "\x1b[33;20m" red = "\x1b[31;20m" bold_red = "\x1b[31;1m" reset = "\x1b[0m" self.FORMATS = { logging.DEBUG: green + fmt + reset, logging.INFO: grey + fmt + reset, logging.WARNING: yellow + fmt + reset, logging.ERROR: red + fmt + reset, logging.CRITICAL: bold_red + fmt + reset, } log_fmt = self.FORMATS.get(record.levelno) return logging.Formatter(log_fmt, datefmt="%Y-%m-%d %H:%M:%S").format(record) # allocate logger object log = logging.getLogger(__name__) log.setLevel(QIBOCAL_LOG_LEVEL) log.addHandler(CustomHandler()) src/qibocal/__init__.py METASEP # -*- coding: utf-8 -*- from .cli import command, live_plot, upload """qibocal: Quantum Calibration Verification and Validation using Qibo.""" import importlib.metadata as im __version__ = im.version(__package__) src/qibocal/plots/__init__.py METASEP # -*- coding: utf-8 -*- from qibocal.plots.heatmaps import * from qibocal.plots.scatters import * src/qibocal/plots/spectroscopies.py METASEP
[ { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n\n # fitting shifted traces\n if len(data_shifted) > 0 and len(data_fit_shifted) > 0:\n freqrange = np.linspace(", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n\n # fitting shifted traces\n if len(data_shifted) > 0 and len(data_fit_shifted) > 0:\n freqrange = np.linspace(\n min(data_shifted.get_values(\"frequency\", \"GHz\")),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n\n # fitting shifted traces\n if len(data_shifted) > 0 and len(data_fit_shifted) > 0:\n freqrange = np.linspace(\n min(data_shifted.get_values(\"frequency\", \"GHz\")),\n max(data_shifted.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_shifted),\n )", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n\n # fitting shifted traces\n if len(data_shifted) > 0 and len(data_fit_shifted) > 0:\n freqrange = np.linspace(\n min(data_shifted.get_values(\"frequency\", \"GHz\")),\n max(data_shifted.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_shifted),\n )\n params = [i for i in list(data_fit_shifted.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit_shifted.get_values(\"popt0\"),\n data_fit_shifted.get_values(\"popt1\"),\n data_fit_shifted.get_values(\"popt2\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n\n # fitting shifted traces\n if len(data_shifted) > 0 and len(data_fit_shifted) > 0:\n freqrange = np.linspace(\n min(data_shifted.get_values(\"frequency\", \"GHz\")),\n max(data_shifted.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_shifted),\n )\n params = [i for i in list(data_fit_shifted.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit_shifted.get_values(\"popt0\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n\n # fitting shifted traces\n if len(data_shifted) > 0 and len(data_fit_shifted) > 0:\n freqrange = np.linspace(\n min(data_shifted.get_values(\"frequency\", \"GHz\")),\n max(data_shifted.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_shifted),\n )\n params = [i for i in list(data_fit_shifted.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit_shifted.get_values(\"popt0\"),\n data_fit_shifted.get_values(\"popt1\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n\n # fitting shifted traces\n if len(data_shifted) > 0 and len(data_fit_shifted) > 0:\n freqrange = np.linspace(\n min(data_shifted.get_values(\"frequency\", \"GHz\")),\n max(data_shifted.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_shifted),\n )\n params = [i for i in list(data_fit_shifted.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n\n # fitting shifted traces\n if len(data_shifted) > 0 and len(data_fit_shifted) > 0:\n freqrange = np.linspace(\n min(data_shifted.get_values(\"frequency\", \"GHz\")),\n max(data_shifted.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_shifted),\n )\n params = [i for i in list(data_fit_shifted.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit_shifted.get_values(\"popt0\"),\n data_fit_shifted.get_values(\"popt1\"),\n data_fit_shifted.get_values(\"popt2\"),\n data_fit_shifted.get_values(\"popt3\"),\n ),\n name=\"Fit shifted spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n\n # fitting shifted traces\n if len(data_shifted) > 0 and len(data_fit_shifted) > 0:\n freqrange = np.linspace(\n min(data_shifted.get_values(\"frequency\", \"GHz\")),\n max(data_shifted.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_shifted),\n )\n params = [i for i in list(data_fit_shifted.df.keys()) if \"popt\" not in i]\n fig.add_trace(", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"attenuation\", \"dB\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Attenuation (dB)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Attenuation (dB)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),\n row=1,\n col=j,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n showscale=showscale,\n ),\n row=2,\n col=j,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n )\n return fig\n\n\ndef dispersive_frequency_msr_phase(folder, routine, qubit, formato):\n\n try:\n data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")\n except:\n data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})\n\n try:\n data_shifted = DataUnits.load_data(\n folder, routine, formato, f\"data_shifted_q{qubit}\"\n )\n except:\n data_shifted = DataUnits(\n name=f\"data_shifted_q{qubit}\", quantities={\"frequency\": \"Hz\"}\n )\n\n try:\n data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n try:\n data_fit_shifted = Data.load_data(\n folder, routine, formato, f\"fit_shifted_q{qubit}\"\n )\n except:\n data_fit_shifted = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"MSR\", \"uV\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_spec.get_values(\"frequency\", \"GHz\"),\n y=data_spec.get_values(\"phase\", \"rad\"),\n name=\"Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"MSR\", \"uV\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_shifted.get_values(\"frequency\", \"GHz\"),\n y=data_shifted.get_values(\"phase\", \"rad\"),\n name=\"Shifted Spectroscopy\",\n ),\n row=1,\n col=2,\n )\n\n # fitting traces\n if len(data_spec) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_spec.get_values(\"frequency\", \"GHz\")),\n max(data_spec.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_spec),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit spectroscopy\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\nfrom qibocal.data import Data, DataUnits\nfrom qibocal.fitting.utils import lorenzian\n\n\ndef frequency_msr_phase__fast_precision(folder, routine, qubit, format):\n try:\n data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")\n except:\n data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_precision = DataUnits.load_data(\n folder, routine, format, f\"precision_sweep_q{qubit}\"\n )\n except:\n data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})\n try:\n data_fit = Data.load_data(folder, routine, format, f\"fit_q{qubit}\")\n except:\n data_fit = Data(\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"label1\",\n \"label2\",\n ]\n )\n\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"MSR\", \"uV\"),\n name=\"Fast\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_fast.get_values(\"frequency\", \"GHz\"),\n y=data_fast.get_values(\"phase\", \"rad\"),\n name=\"Fast\",\n ),\n row=1,\n col=2,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"MSR\", \"uV\"),\n name=\"Precision\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=data_precision.get_values(\"frequency\", \"GHz\"),\n y=data_precision.get_values(\"phase\", \"rad\"),\n name=\"Precision\",\n ),\n row=1,\n col=2,\n )\n if len(data_fast) > 0 and len(data_fit) > 0:\n freqrange = np.linspace(\n min(data_fast.get_values(\"frequency\", \"GHz\")),\n max(data_fast.get_values(\"frequency\", \"GHz\")),\n 2 * len(data_fast),\n )\n params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]\n fig.add_trace(\n go.Scatter(\n x=freqrange,\n y=lorenzian(\n freqrange,\n data_fit.get_values(\"popt0\"),\n data_fit.get_values(\"popt1\"),\n data_fit.get_values(\"popt2\"),\n data_fit.get_values(\"popt3\"),\n ),\n name=\"Fit\",\n line=go.scatter.Line(dash=\"dot\"),\n ),\n row=1,\n col=1,\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.25,\n showarrow=False,\n text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.add_annotation(\n dict(\n font=dict(color=\"black\", size=12),\n x=0,\n y=-0.30,\n showarrow=False,\n text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",\n textangle=0,\n xanchor=\"left\",\n xref=\"paper\",\n yref=\"paper\",\n )\n )\n fig.update_layout(\n showlegend=True,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (uV)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Phase (rad)\",\n )\n return fig\n\n\ndef frequency_attenuation_msr_phase__cut(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot\n\n fig = go.Figure()\n # index data on a specific attenuation value\n smalldf = data.df[data.get_values(\"attenuation\", \"dB\") == plot1d_attenuation].copy()\n # split multiple software averages to different datasets\n datasets = []\n while len(smalldf):\n datasets.append(smalldf.drop_duplicates(\"frequency\"))\n smalldf.drop(datasets[-1].index, inplace=True)\n fig.add_trace(\n go.Scatter(\n x=datasets[-1][\"frequency\"].pint.to(\"GHz\").pint.magnitude,\n y=datasets[-1][\"MSR\"].pint.to(\"V\").pint.magnitude,\n ),\n )\n\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting,\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"MSR (V)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase(folder, routine, qubit, format):\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")\n fig = make_subplots(\n rows=1,\n cols=2,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n subplot_titles=(\n \"MSR (V)\",\n \"phase (rad)\",\n ),\n )\n\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n colorbar_x=0.45,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"phase\", \"rad\"),\n colorbar_x=1.0,\n ),\n row=1,\n col=2,\n )\n fig.update_layout(\n showlegend=False,\n uirevision=\"0\", # ``uirevision`` allows zooming while live plotting\n xaxis_title=\"Frequency (GHz)\",\n yaxis_title=\"Current (A)\",\n xaxis2_title=\"Frequency (GHz)\",\n yaxis2_title=\"Current (A)\",\n )\n return fig\n\n\ndef frequency_flux_msr_phase__matrix(folder, routine, qubit, format):\n fluxes = []\n for i in range(25): # FIXME: 25 is hardcoded\n file = f\"{folder}/data/{routine}/data_q{qubit}_f{i}.csv\"\n if os.path.exists(file):\n fluxes += [i]\n\n if len(fluxes) < 1:\n nb = 1\n else:\n nb = len(fluxes)\n fig = make_subplots(\n rows=2,\n cols=nb,\n horizontal_spacing=0.1,\n vertical_spacing=0.1,\n x_title=\"Frequency (Hz)\",\n y_title=\"Current (A)\",\n shared_xaxes=True,\n shared_yaxes=True,\n )\n\n for j in fluxes:\n if j == fluxes[-1]:\n showscale = True\n else:\n showscale = False\n data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")\n fig.add_trace(\n go.Heatmap(\n x=data.get_values(\"frequency\", \"GHz\"),\n y=data.get_values(\"current\", \"A\"),\n z=data.get_values(\"MSR\", \"V\"),\n showscale=showscale,\n ),", "type": "random" } ]
[ " data_precision = DataUnits(quantities={\"frequency\": \"Hz\"})", " data_fit = Data(", " y=data_spec.get_values(\"MSR\", \"uV\"),", " data_precision = DataUnits.load_data(", " min(data_shifted.get_values(\"frequency\", \"GHz\")),", " data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}\")", " data_shifted = DataUnits(", " x=data_fast.get_values(\"frequency\", \"GHz\"),", " y=data_fast.get_values(\"phase\", \"rad\"),", " y=data_shifted.get_values(\"MSR\", \"uV\"),", " max(data_shifted.get_values(\"frequency\", \"GHz\")),", " params = [i for i in list(data_fit_shifted.df.keys()) if \"popt\" not in i]", " data = DataUnits.load_data(folder, routine, format, f\"data_q{qubit}_f{j}\")", " min(data_fast.get_values(\"frequency\", \"GHz\")),", " data_fit.get_values(\"popt3\"),", " data_spec = DataUnits.load_data(folder, routine, formato, f\"data_q{qubit}\")", " x=data_shifted.get_values(\"frequency\", \"GHz\"),", " data_fast = DataUnits.load_data(folder, routine, format, f\"fast_sweep_q{qubit}\")", " data_fit.get_values(\"popt1\"),", " y=data_spec.get_values(\"phase\", \"rad\"),", " max(data_fast.get_values(\"frequency\", \"GHz\")),", " y=data_precision.get_values(\"MSR\", \"uV\"),", " y=data_fast.get_values(\"MSR\", \"uV\"),", " data_fit_shifted.get_values(\"popt3\"),", " data_fast = DataUnits(quantities={\"frequency\": \"Hz\"})", " y=data.get_values(\"current\", \"A\"),", " data_fit_shifted.get_values(\"popt1\"),", " y=data.get_values(\"attenuation\", \"dB\"),", " data_spec = DataUnits(name=f\"data_q{qubit}\", quantities={\"frequency\": \"Hz\"})", " params = [i for i in list(data_fit.df.keys()) if \"popt\" not in i]", " x=data.get_values(\"frequency\", \"GHz\"),", " data_fit_shifted = Data(", " y=data_shifted.get_values(\"phase\", \"rad\"),", " data_fit.get_values(\"popt2\"),", " data_fit_shifted.get_values(\"popt2\"),", " data_fit_shifted.get_values(\"popt0\"),", " y=lorenzian(", " data_fit_shifted = Data.load_data(", " text=f\"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.\",", " data_fit = Data.load_data(folder, routine, formato, f\"fit_q{qubit}\")", " text=f\"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.\",", " z=data.get_values(\"MSR\", \"V\"),", " folder, routine, format, f\"precision_sweep_q{qubit}\"", " z=data.get_values(\"phase\", \"rad\"),", " data_shifted = DataUnits.load_data(", " x=data_spec.get_values(\"frequency\", \"GHz\"),", " text=f\"The estimated shifted {params[0]} is {data_fit_shifted.df[params[0]][0]:.1f} Hz.\",", " smalldf.drop(datasets[-1].index, inplace=True)", " )", "", " go.Scatter(", " fig = make_subplots(", " xref=\"paper\",", " )", " row=1," ]
METASEP
16
qiboteam__qibocal
qiboteam__qibocal METASEP src/qibocal/calibrations/protocols/test.py METASEP # -*- coding: utf-8 -*- from qibo import gates, models from qibocal.data import Data def test( platform, qubit: list, nshots, points=1, ): data = Data("test", quantities=["nshots", "probabilities"]) nqubits = len(qubit) circuit = models.Circuit(nqubits) circuit.add(gates.H(qubit[0])) circuit.add(gates.H(qubit[1])) # circuit.add(gates.H(1)) circuit.add(gates.M(*qubit)) execution = circuit(nshots=nshots) data.add({"nshots": nshots, "probabilities": execution.probabilities()}) yield data src/qibocal/calibrations/protocols/__init__.py METASEP src/qibocal/calibrations/characterization/utils.py METASEP # -*- coding: utf-8 -*- import numpy as np def variable_resolution_scanrange( lowres_width, lowres_step, highres_width, highres_step ): """Helper function for sweeps.""" return np.concatenate( ( np.arange(-lowres_width, -highres_width, lowres_step), np.arange(-highres_width, highres_width, highres_step), np.arange(highres_width, lowres_width, lowres_step), ) ) src/qibocal/calibrations/characterization/t1.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import Dataset from qibocal.decorators import plot from qibocal.fitting.methods import t1_fit @plot("MSR vs Time", plots.t1_time_msr_phase) def t1( platform: AbstractPlatform, qubit: int, delay_before_readout_start, delay_before_readout_end, delay_before_readout_step, software_averages, points=10, ): sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration) sequence.add(qd_pulse) sequence.add(ro_pulse) ro_wait_range = np.arange( delay_before_readout_start, delay_before_readout_end, delay_before_readout_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) data = Dataset(name=f"data_q{qubit}", quantities={"Time": "ns"}) count = 0 for _ in range(software_averages): for wait in ro_wait_range: if count % points == 0 and count > 0: yield data yield t1_fit( data, x="Time[ns]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["t1"], ) ro_pulse.start = qd_pulse.duration + wait msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "Time[ns]": wait, } data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/resonator_spectroscopy.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.calibrations.characterization.utils import variable_resolution_scanrange from qibocal.data import Dataset from qibocal.decorators import plot from qibocal.fitting.methods import lorentzian_fit @plot("MSR and Phase vs Frequency", plots.frequency_msr_phase__fast_precision) def resonator_spectroscopy( platform: AbstractPlatform, qubit: int, lowres_width, lowres_step, highres_width, highres_step, precision_width, precision_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( variable_resolution_scanrange( lowres_width, lowres_step, highres_width, highres_step ) + resonator_frequency ) fast_sweep_data = Dataset( name=f"fast_sweep_q{qubit}", quantities={"frequency": "Hz"} ) count = 0 for _ in range(software_averages): for freq in frequency_range: if count % points == 0 and count > 0: yield fast_sweep_data yield lorentzian_fit( fast_sweep_data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } fast_sweep_data.add(results) count += 1 yield fast_sweep_data # FIXME: have live ploting work for multiple datasets saved if platform.resonator_type == "3D": resonator_frequency = fast_sweep_data.df.frequency[ fast_sweep_data.df.MSR.index[fast_sweep_data.df.MSR.argmax()] ].magnitude avg_voltage = ( np.mean(fast_sweep_data.df.MSR.values[: (lowres_width // lowres_step)]) * 1e6 ) else: resonator_frequency = fast_sweep_data.df.frequency[ fast_sweep_data.df.MSR.index[fast_sweep_data.df.MSR.argmin()] ].magnitude avg_voltage = ( np.mean(fast_sweep_data.df.MSR.values[: (lowres_width // lowres_step)]) * 1e6 ) precision_sweep__data = Dataset( name=f"precision_sweep_q{qubit}", quantities={"frequency": "Hz"} ) freqrange = ( np.arange(-precision_width, precision_width, precision_step) + resonator_frequency ) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield precision_sweep__data yield lorentzian_fit( fast_sweep_data + precision_sweep__data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } precision_sweep__data.add(results) count += 1 yield precision_sweep__data @plot("Frequency vs Attenuation", plots.frequency_attenuation_msr_phase) @plot("MSR vs Frequency", plots.frequency_attenuation_msr_phase__cut) def resonator_punchout( platform: AbstractPlatform, qubit: int, freq_width, freq_step, min_att, max_att, step_att, software_averages, points=10, ): platform.reload_settings() data = Dataset( name=f"data_q{qubit}", quantities={"frequency": "Hz", "attenuation": "dB"} ) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence = PulseSequence() sequence.add(ro_pulse) # TODO: move this explicit instruction to the platform resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency - (freq_width / 4) ) attenuation_range = np.flip(np.arange(min_att, max_att, step_att)) count = 0 for _ in range(software_averages): for att in attenuation_range: for freq in frequency_range: if count % points == 0: yield data # TODO: move these explicit instructions to the platform platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.ro_port[qubit].attenuation = att msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr * (np.exp(att / 10)), "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "attenuation[dB]": att, } # TODO: implement normalization data.add(results) count += 1 yield data @plot("MSR and Phase vs Flux Current", plots.frequency_flux_msr_phase) def resonator_spectroscopy_flux( platform: AbstractPlatform, qubit: int, freq_width, freq_step, current_max, current_min, current_step, software_averages, fluxline=0, points=10, ): platform.reload_settings() if fluxline == "qubit": fluxline = qubit sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) data = Dataset( name=f"data_q{qubit}", quantities={"frequency": "Hz", "current": "A"} ) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] qubit_biasing_current = platform.characterization["single_qubit"][qubit][ "sweetspot" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) current_range = ( np.arange(current_min, current_max, current_step) + qubit_biasing_current ) count = 0 for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data # TODO: automatically extract the sweet spot current # TODO: add a method to generate the matrix @plot("MSR row 1 and Phase row 2", plots.frequency_flux_msr_phase__matrix) def resonator_spectroscopy_flux_matrix( platform: AbstractPlatform, qubit: int, freq_width, freq_step, current_min, current_max, current_step, fluxlines, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) current_range = np.arange(current_min, current_max, current_step) count = 0 for fluxline in fluxlines: fluxline = int(fluxline) print(fluxline) data = Dataset( name=f"data_q{qubit}_f{fluxline}", quantities={"frequency": "Hz", "current": "A"}, ) for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data @plot("MSR and Phase vs Frequency", plots.dispersive_frequency_msr_phase) def dispersive_shift( platform: AbstractPlatform, qubit: int, freq_width, freq_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) data_spec = Dataset(name=f"data_q{qubit}", quantities={"frequency": "Hz"}) count = 0 for _ in range(software_averages): for freq in frequency_range: if count % points == 0 and count > 0: yield data_spec yield lorentzian_fit( data_spec, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } data_spec.add(results) count += 1 yield data_spec # Shifted Spectroscopy sequence = PulseSequence() RX_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX_pulse.finish) sequence.add(RX_pulse) sequence.add(ro_pulse) data_shifted = Dataset( name=f"data_shifted_q{qubit}", quantities={"frequency": "Hz"} ) count = 0 for _ in range(software_averages): for freq in frequency_range: if count % points == 0 and count > 0: yield data_shifted yield lorentzian_fit( data_spec, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], fit_file_name="fit_shifted", ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } data_shifted.add(results) count += 1 yield data_shifted src/qibocal/calibrations/characterization/ramsey.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import Dataset from qibocal.decorators import plot from qibocal.fitting.methods import ramsey_fit @plot("MSR vs Time", plots.time_msr) def ramsey_frequency_detuned( platform: AbstractPlatform, qubit: int, t_start, t_end, t_step, n_osc, points=10, ): platform.reload_settings() sampling_rate = platform.sampling_rate data = Dataset(name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"}) RX90_pulse1 = platform.create_RX90_pulse(qubit, start=0) RX90_pulse2 = platform.create_RX90_pulse(qubit, start=RX90_pulse1.finish) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX90_pulse2.finish) sequence = PulseSequence() sequence.add(RX90_pulse1) sequence.add(RX90_pulse2) sequence.add(ro_pulse) runcard_qubit_freq = platform.characterization["single_qubit"][qubit]["qubit_freq"] runcard_T2 = platform.characterization["single_qubit"][qubit]["T2"] intermediate_freq = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "frequency" ] current_qubit_freq = runcard_qubit_freq current_T2 = runcard_T2 # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - RX90_pulse1.frequency ) t_end = np.array(t_end) for t_max in t_end: count = 0 platform.qd_port[qubit].lo_frequency = current_qubit_freq - intermediate_freq offset_freq = n_osc / t_max * sampling_rate # Hz t_range = np.arange(t_start, t_max, t_step) for wait in t_range: if count % points == 0 and count > 0: yield data yield ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=current_qubit_freq, sampling_rate=sampling_rate, offset_freq=offset_freq, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) RX90_pulse2.start = RX90_pulse1.finish + wait RX90_pulse2.relative_phase = ( (RX90_pulse2.start / sampling_rate) * (2 * np.pi) * (-offset_freq) ) ro_pulse.start = RX90_pulse2.finish msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "wait[ns]": wait, "t_max[ns]": t_max, } data.add(results) count += 1 # # Fitting data_fit = ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=current_qubit_freq, sampling_rate=sampling_rate, offset_freq=offset_freq, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) new_t2 = data_fit.get_values("t2") corrected_qubit_freq = data_fit.get_values("corrected_qubit_frequency") # if ((new_t2 * 3.5) > t_max): if (new_t2 > current_T2).bool() and len(t_end) > 1: current_qubit_freq = int(corrected_qubit_freq) current_T2 = new_t2 data = Dataset( name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"} ) else: corrected_qubit_freq = int(current_qubit_freq) new_t2 = current_T2 break yield data @plot("MSR vs Time", plots.time_msr) def ramsey( platform: AbstractPlatform, qubit: int, delay_between_pulses_start, delay_between_pulses_end, delay_between_pulses_step, software_averages, points=10, ): platform.reload_settings() sampling_rate = platform.sampling_rate qubit_freq = platform.characterization["single_qubit"][qubit]["qubit_freq"] RX90_pulse1 = platform.create_RX90_pulse(qubit, start=0) RX90_pulse2 = platform.create_RX90_pulse(qubit, start=RX90_pulse1.finish) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX90_pulse2.finish) sequence = PulseSequence() sequence.add(RX90_pulse1) sequence.add(RX90_pulse2) sequence.add(ro_pulse) waits = np.arange( delay_between_pulses_start, delay_between_pulses_end, delay_between_pulses_step, ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - RX90_pulse1.frequency ) data = Dataset(name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"}) count = 0 for _ in range(software_averages): for wait in waits: if count % points == 0 and count > 0: yield data yield ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=qubit_freq, sampling_rate=sampling_rate, offset_freq=0, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) RX90_pulse2.start = RX90_pulse1.finish + wait ro_pulse.start = RX90_pulse2.finish msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "wait[ns]": wait, "t_max[ns]": np.array(delay_between_pulses_end), } data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/rabi_oscillations.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import Dataset from qibocal.decorators import plot from qibocal.fitting.methods import rabi_fit @plot("MSR vs Time", plots.time_msr_phase) def rabi_pulse_length( platform: AbstractPlatform, qubit: int, pulse_duration_start, pulse_duration_end, pulse_duration_step, software_averages, points=10, ): platform.reload_settings() data = Dataset(name=f"data_q{qubit}", quantities={"Time": "ns"}) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="Time[ns]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_duration", "rabi_oscillations_pi_pulse_max_voltage", "t1", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "Time[ns]": duration, } data.add(results) count += 1 yield data @plot("MSR vs Gain", plots.gain_msr_phase) def rabi_pulse_gain( platform: AbstractPlatform, qubit: int, pulse_gain_start, pulse_gain_end, pulse_gain_step, software_averages, points=10, ): platform.reload_settings() data = Dataset(name=f"data_q{qubit}", quantities={"gain": "dimensionless"}) sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for gain in qd_pulse_gain_range: platform.qd_port[qubit].gain = gain if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="gain[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_gain", "rabi_oscillations_pi_pulse_max_voltage", "t1", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "gain[dimensionless]": gain, } data.add(results) count += 1 yield data @plot("MSR vs Amplitude", plots.amplitude_msr_phase) def rabi_pulse_amplitude( platform, qubit: int, pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step, software_averages, points=10, ): platform.reload_settings() data = Dataset(name=f"data_q{qubit}", quantities={"amplitude": "dimensionless"}) sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_amplitude_range = np.arange( pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for amplitude in qd_pulse_amplitude_range: qd_pulse.amplitude = amplitude if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="amplitude[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_amplitude", "rabi_oscillations_pi_pulse_max_voltage", "t1", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "amplitude[dimensionless]": amplitude, } data.add(results) count += 1 yield data @plot("MSR vs length and gain", plots.duration_gain_msr_phase) def rabi_pulse_length_and_gain( platform: AbstractPlatform, qubit: int, pulse_duration_start, pulse_duration_end, pulse_duration_step, pulse_gain_start, pulse_gain_end, pulse_gain_step, software_averages, points=10, ): platform.reload_settings() data = Dataset( name=f"data_q{qubit}", quantities={"duration": "ns", "gain": "dimensionless"} ) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration for gain in qd_pulse_gain_range: platform.qd_port[qubit].gain = gain if count % points == 0 and count > 0: yield data msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "duration[ns]": duration, "gain[dimensionless]": gain, } data.add(results) count += 1 yield data @plot("MSR vs length and amplitude", plots.duration_amplitude_msr_phase) def rabi_pulse_length_and_amplitude( platform, qubit: int, pulse_duration_start, pulse_duration_end, pulse_duration_step, pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step, software_averages, points=10, ): platform.reload_settings() data = Dataset( name=f"data_q{qubit}", quantities={"duration": "ns", "amplitude": "dimensionless"}, ) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) qd_pulse_amplitude_range = np.arange( pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration for amplitude in qd_pulse_amplitude_range: qd_pulse.amplitude = amplitude if count % points == 0: yield data msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "duration[ns]": duration, "amplitude[dimensionless]": amplitude, } data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/qubit_spectroscopy.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import Dataset from qibocal.decorators import plot from qibocal.fitting.methods import lorentzian_fit @plot("MSR and Phase vs Frequency", plots.frequency_msr_phase__fast_precision) def qubit_spectroscopy( platform: AbstractPlatform, qubit: int, fast_start, fast_end, fast_step, precision_start, precision_end, precision_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=5000) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=5000) sequence.add(qd_pulse) sequence.add(ro_pulse) qubit_frequency = platform.characterization["single_qubit"][qubit]["qubit_freq"] freqrange = np.arange(fast_start, fast_end, fast_step) + qubit_frequency data = Dataset(quantities={"frequency": "Hz", "attenuation": "dB"}) # FIXME: Waiting for Qblox platform to take care of that platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) data = Dataset(name=f"fast_sweep_q{qubit}", quantities={"frequency": "Hz"}) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield data yield lorentzian_fit( data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["qubit_freq", "peak_voltage"], ) platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } data.add(results) count += 1 yield data if platform.resonator_type == "3D": qubit_frequency = data.df.frequency[ data.df.MSR.index[data.df.MSR.argmin()] ].magnitude avg_voltage = ( np.mean(data.df.MSR.values[: ((fast_end - fast_start) // fast_step)]) * 1e6 ) else: qubit_frequency = data.df.frequency[ data.df.MSR.index[data.df.MSR.argmax()] ].magnitude avg_voltage = ( np.mean(data.df.MSR.values[: ((fast_end - fast_start) // fast_step)]) * 1e6 ) prec_data = Dataset( name=f"precision_sweep_q{qubit}", quantities={"frequency": "Hz"} ) freqrange = ( np.arange(precision_start, precision_end, precision_step) + qubit_frequency ) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield prec_data yield lorentzian_fit( data + prec_data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["qubit_freq", "peak_voltage"], ) platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } prec_data.add(results) count += 1 yield prec_data # TODO: Estimate avg_voltage correctly @plot("MSR and Phase vs Frequency", plots.frequency_flux_msr_phase) def qubit_spectroscopy_flux( platform: AbstractPlatform, qubit: int, freq_width, freq_step, current_max, current_min, current_step, software_averages, fluxline, points=10, ): platform.reload_settings() if fluxline == "qubit": fluxline = qubit sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=5000) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=5000) sequence.add(qd_pulse) sequence.add(ro_pulse) data = Dataset( name=f"data_q{qubit}", quantities={"frequency": "Hz", "current": "A"} ) qubit_frequency = platform.characterization["single_qubit"][qubit]["qubit_freq"] qubit_biasing_current = platform.characterization["single_qubit"][qubit][ "sweetspot" ] frequency_range = np.arange(-freq_width, freq_width, freq_step) + qubit_frequency current_range = ( np.arange(current_min, current_max, current_step) + qubit_biasing_current ) count = 0 for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/flipping.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import Dataset from qibocal.decorators import plot from qibocal.fitting.methods import flipping_fit @plot("MSR vs Flips", plots.flips_msr_phase) def flipping( platform: AbstractPlatform, qubit: int, niter, step, points=10, ): platform.reload_settings() pi_pulse_amplitude = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "amplitude" ] data = Dataset(name=f"data_q{qubit}", quantities={"flips": "dimensionless"}) sequence = PulseSequence() RX90_pulse = platform.create_RX90_pulse(qubit, start=0) count = 0 # repeat N iter times for n in range(0, niter, step): if count % points == 0 and count > 0: yield data yield flipping_fit( data, x="flips[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], niter=niter, pi_pulse_amplitude=pi_pulse_amplitude, labels=["amplitude_delta", "corrected_amplitude"], ) sequence.add(RX90_pulse) # execute sequence RX(pi/2) - [RX(pi) - RX(pi)] from 0...n times - RO start1 = RX90_pulse.duration for j in range(n): RX_pulse1 = platform.create_RX_pulse(qubit, start=start1) start2 = start1 + RX_pulse1.duration RX_pulse2 = platform.create_RX_pulse(qubit, start=start2) sequence.add(RX_pulse1) sequence.add(RX_pulse2) start1 = start2 + RX_pulse2.duration # add ro pulse at the end of the sequence ro_pulse = platform.create_qubit_readout_pulse(qubit, start=start1) sequence.add(ro_pulse) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ro_pulse.serial] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "flips[dimensionless]": np.array(n), } data.add(results) count += 1 sequence = PulseSequence() yield data src/qibocal/calibrations/characterization/calibrate_qubit_states.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import Dataset from qibocal.decorators import plot @plot("exc vs gnd", plots.exc_gnd) def calibrate_qubit_states_binning( platform: AbstractPlatform, qubit: int, niter, points=10, ): platform.reload_settings() platform.qrm[qubit].ports[ "i1" ].hardware_demod_en = True # binning only works with hardware demodulation enabled # create exc sequence exc_sequence = PulseSequence() RX_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX_pulse.duration) exc_sequence.add(RX_pulse) exc_sequence.add(ro_pulse) data_exc = Dataset( name=f"data_exc_q{qubit}", quantities={"iteration": "dimensionless"} ) shots_results = platform.execute_pulse_sequence(exc_sequence, nshots=niter)[ "shots" ][ro_pulse.serial] for n in np.arange(niter): msr, phase, i, q = shots_results[n] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "iteration[dimensionless]": n, } data_exc.add(results) yield data_exc gnd_sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) gnd_sequence.add(ro_pulse) data_gnd = Dataset( name=f"data_gnd_q{qubit}", quantities={"iteration": "dimensionless"} ) shots_results = platform.execute_pulse_sequence(gnd_sequence, nshots=niter)[ "shots" ][ro_pulse.serial] for n in np.arange(niter): msr, phase, i, q = shots_results[n] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "iteration[dimensionless]": n, } data_gnd.add(results) yield data_gnd src/qibocal/calibrations/characterization/allXY.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import Dataset from qibocal.decorators import plot from qibocal.fitting.methods import drag_tunning_fit # allXY rotations gatelist = [ ["I", "I"], ["RX(pi)", "RX(pi)"], ["RY(pi)", "RY(pi)"], ["RX(pi)", "RY(pi)"], ["RY(pi)", "RX(pi)"], ["RX(pi/2)", "I"], ["RY(pi/2)", "I"], ["RX(pi/2)", "RY(pi/2)"], ["RY(pi/2)", "RX(pi/2)"], ["RX(pi/2)", "RY(pi)"], ["RY(pi/2)", "RX(pi)"], ["RX(pi)", "RY(pi/2)"], ["RY(pi)", "RX(pi/2)"], ["RX(pi/2)", "RX(pi)"], ["RX(pi)", "RX(pi/2)"], ["RY(pi/2)", "RY(pi)"], ["RY(pi)", "RY(pi/2)"], ["RX(pi)", "I"], ["RY(pi)", "I"], ["RX(pi/2)", "RX(pi/2)"], ["RY(pi/2)", "RY(pi/2)"], ] @plot("Prob vs gate sequence", plots.prob_gate) def allXY( platform: AbstractPlatform, qubit: int, beta_param=None, software_averages=1, points=10, ): platform.reload_settings() state0_voltage = complex( platform.characterization["single_qubit"][qubit]["state0_voltage"] ) state1_voltage = complex( platform.characterization["single_qubit"][qubit]["state1_voltage"] ) data = Dataset( name=f"data_q{qubit}", quantities={"probability": "dimensionless", "gateNumber": "dimensionless"}, ) count = 0 for _ in range(software_averages): gateNumber = 1 for gates in gatelist: if count % points == 0 and count > 0: yield data seq, ro_pulse = _get_sequence_from_gate_pair( platform, gates, qubit, beta_param ) seq.add(ro_pulse) msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[ ro_pulse.serial ] if platform.resonator_type == "3D": prob = np.abs(msr * 1e6 - state1_voltage) / ( state0_voltage - state1_voltage ) prob = (2 * prob) - 1 else: prob = np.abs(msr * 1e6 - state1_voltage) / ( state1_voltage - state0_voltage ) prob = (2 * prob) - 1 results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "probability[dimensionless]": prob, "gateNumber[dimensionless]": np.array(gateNumber), } data.add(results) count += 1 gateNumber += 1 yield data @plot("Prob vs gate sequence", plots.prob_gate_iteration) def allXY_iteration( platform: AbstractPlatform, qubit: int, beta_start, beta_end, beta_step, software_averages=1, points=10, ): platform.reload_settings() state0_voltage = complex( platform.characterization["single_qubit"][qubit]["state0_voltage"] ) state1_voltage = complex( platform.characterization["single_qubit"][qubit]["state1_voltage"] ) data = Dataset( name=f"data_q{qubit}", quantities={ "probability": "dimensionless", "gateNumber": "dimensionless", "beta_param": "dimensionless", }, ) count = 0 for _ in range(software_averages): for beta_param in np.arange(beta_start, beta_end, beta_step).round(4): gateNumber = 1 for gates in gatelist: if count % points == 0 and count > 0: yield data seq, ro_pulse = _get_sequence_from_gate_pair( platform, gates, qubit, beta_param ) seq.add(ro_pulse) msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[ ro_pulse.serial ] if platform.resonator_type == "3D": prob = np.abs(msr * 1e6 - state1_voltage) / ( state0_voltage - state1_voltage ) prob = (2 * prob) - 1 else: prob = np.abs(msr * 1e6 - state1_voltage) / ( state1_voltage - state0_voltage ) prob = (2 * prob) - 1 results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "probability[dimensionless]": prob, "gateNumber[dimensionless]": np.array(gateNumber), "beta_param[dimensionless]": np.array(beta_param), } data.add(results) count += 1 gateNumber += 1 yield data @plot("MSR vs beta parameter", plots.msr_beta) def drag_pulse_tunning( platform: AbstractPlatform, qubit: int, beta_start, beta_end, beta_step, points=10, ): platform.reload_settings() data = Dataset(name=f"data_q{qubit}", quantities={"beta_param": "dimensionless"}) count = 0 for beta_param in np.arange(beta_start, beta_end, beta_step).round(4): if count % points == 0 and count > 0: yield data yield drag_tunning_fit( data, x="beta_param[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "optimal_beta_param", ], ) # drag pulse RX(pi/2) RX90_drag_pulse = platform.create_RX90_drag_pulse( qubit, start=0, beta=beta_param ) # drag pulse RY(pi) RY_drag_pulse = platform.create_RX_drag_pulse( qubit, start=RX90_drag_pulse.finish, relative_phase=+np.pi / 2, beta=beta_param, ) # RO pulse ro_pulse = platform.create_qubit_readout_pulse( qubit, start=RY_drag_pulse.finish ) # Rx(pi/2) - Ry(pi) - Ro seq1 = PulseSequence() seq1.add(RX90_drag_pulse) seq1.add(RY_drag_pulse) seq1.add(ro_pulse) msr1, i1, q1, phase1 = platform.execute_pulse_sequence(seq1, nshots=1024)[ ro_pulse.serial ] # drag pulse RY(pi) RY_drag_pulse = platform.create_RX_drag_pulse( qubit, start=0, relative_phase=np.pi / 2, beta=beta_param ) # drag pulse RX(pi/2) RX90_drag_pulse = platform.create_RX90_drag_pulse( qubit, start=RY_drag_pulse.finish, beta=beta_param ) # Ry(pi) - Rx(pi/2) - Ro seq2 = PulseSequence() seq2.add(RY_drag_pulse) seq2.add(RX90_drag_pulse) seq2.add(ro_pulse) msr2, phase2, i2, q2 = platform.execute_pulse_sequence(seq2, nshots=1024)[ ro_pulse.serial ] results = { "MSR[V]": msr1 - msr2, "i[V]": i1 - i2, "q[V]": q1 - q2, "phase[deg]": phase1 - phase2, "beta_param[dimensionless]": beta_param, } data.add(results) count += 1 yield data def _get_sequence_from_gate_pair(platform, gates, qubit, beta_param): sampling_rate = platform.sampling_rate pulse_frequency = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "frequency" ] pulse_duration = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "duration" ] # All gates have equal pulse duration sequence = PulseSequence() sequenceDuration = 0 pulse_start = 0 for gate in gates: if gate == "I": # print("Transforming to sequence I gate") pass if gate == "RX(pi)": # print("Transforming to sequence RX(pi) gate") if beta_param == None: RX_pulse = platform.create_RX_pulse( qubit, start=pulse_start, ) else: RX_pulse = platform.create_RX_drag_pulse( qubit, start=pulse_start, beta=beta_param, ) sequence.add(RX_pulse) if gate == "RX(pi/2)": # print("Transforming to sequence RX(pi/2) gate") if beta_param == None: RX90_pulse = platform.create_RX90_pulse( qubit, start=pulse_start, ) else: RX90_pulse = platform.create_RX90_drag_pulse( qubit, start=pulse_start, beta=beta_param, ) sequence.add(RX90_pulse) if gate == "RY(pi)": # print("Transforming to sequence RY(pi) gate") if beta_param == None: RY_pulse = platform.create_RX_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, ) else: RY_pulse = platform.create_RX_drag_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, beta=beta_param, ) sequence.add(RY_pulse) if gate == "RY(pi/2)": # print("Transforming to sequence RY(pi/2) gate") if beta_param == None: RY90_pulse = platform.create_RX90_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, ) else: RY90_pulse = platform.create_RX90_drag_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, beta=beta_param, ) sequence.add(RY90_pulse) sequenceDuration = sequenceDuration + pulse_duration pulse_start = pulse_duration # RO pulse starting just after pair of gates ro_pulse = platform.create_qubit_readout_pulse(qubit, start=sequenceDuration + 4) return sequence, ro_pulse src/qibocal/calibrations/characterization/__init__.py METASEP doc/source/conf.py METASEP # -*- coding: utf-8 -*- # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys from recommonmark.transform import AutoStructify sys.path.insert(0, os.path.abspath("..")) import qcvv # -- Project information ----------------------------------------------------- project = "qcvv" copyright = "2022, The Qibo team" author = "The Qibo team" # The full version, including alpha/beta/rc tags release = qcvv.__version__ # -- General configuration --------------------------------------------------- # https://stackoverflow.com/questions/56336234/build-fail-sphinx-error-contents-rst-not-found master_doc = "index" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.intersphinx", "recommonmark", "sphinx_markdown_tables", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # Markdown configuration # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = {".rst": "restructuredtext", ".txt": "markdown", ".md": "markdown"} autosectionlabel_prefix_document = True # Allow to embed rst syntax in markdown files. enable_eval_rst = True # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # -- Intersphinx ------------------------------------------------------------- intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} # -- Autodoc ------------------------------------------------------------------ # autodoc_member_order = "bysource" # Adapted this from # https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/docs/conf.py # app setup hook def setup(app): app.add_config_value("recommonmark_config", {"enable_eval_rst": True}, True) app.add_transform(AutoStructify) src/qibocal/web/server.py METASEP # -*- coding: utf-8 -*- import os import pathlib import yaml from flask import Flask, render_template from qcvv.cli.builders import ReportBuilder from qcvv import __version__ server = Flask(__name__) @server.route("/") @server.route("/data/<path>") def page(path=None): folders = [ folder for folder in reversed(sorted(os.listdir(os.getcwd()))) if os.path.isdir(folder) and "meta.yml" in os.listdir(folder) ] report = None if path is not None: try: report = ReportBuilder(path) except (FileNotFoundError, TypeError): pass return render_template( "template.html", version=__version__, folders=folders, report=report, ) src/qibocal/web/report.py METASEP # -*- coding: utf-8 -*- import os import pathlib from jinja2 import Environment, FileSystemLoader from qcvv.cli.builders import ReportBuilder from qcvv import __version__ def create_report(path): """Creates an HTML report for the data in the given path.""" filepath = pathlib.Path(__file__) with open(os.path.join(filepath.with_name("static"), "styles.css"), "r") as file: css_styles = f"<style>\n{file.read()}\n</style>" report = ReportBuilder(path) env = Environment(loader=FileSystemLoader(filepath.with_name("templates"))) template = env.get_template("template.html") html = template.render( is_static=True, css_styles=css_styles, version=__version__, report=report, ) with open(os.path.join(path, "index.html"), "w") as file: file.write(html) src/qibocal/web/app.py METASEP # -*- coding: utf-8 -*- import os import pandas as pd import yaml from dash import Dash, Input, Output, dcc, html from qcvv.data import Dataset from qcvv.web.server import server from qcvv import plots Dataset() # dummy dataset call to suppress ``pint[V]`` error app = Dash( server=server, suppress_callback_exceptions=True, ) app.layout = html.Div( [ dcc.Location(id="url", refresh=False), dcc.Graph(id="graph", figure={}), dcc.Interval( id="interval", # TODO: Perhaps the user should be allowed to change the refresh rate interval=1000, n_intervals=0, disabled=False, ), ] ) @app.callback( Output("graph", "figure"), Input("interval", "n_intervals"), Input("graph", "figure"), Input("url", "pathname"), ) def get_graph(n, current_figure, url): method, folder, routine, qubit, format = url.split(os.sep)[2:] try: # data = Dataset.load_data(folder, routine, format, "precision_sweep") # with open(f"{folder}/platform.yml", "r") as f: # nqubits = yaml.safe_load(f)["nqubits"] # if len(data) > 2: # params, fit = resonator_spectroscopy_fit(folder, format, nqubits) # else: # params, fit = None, None # return getattr(plots.resonator_spectroscopy, method)(data, params, fit) # # FIXME: Temporarily hardcode the plotting method to test # # multiple routines with different names in one folder # # should be changed to: # # return getattr(getattr(plots, routine), method)(data) return getattr(plots, method)(folder, routine, qubit, format) except (FileNotFoundError, pd.errors.EmptyDataError): return current_figure src/qibocal/web/__init__.py METASEP src/qibocal/tests/test_data.py METASEP # -*- coding: utf-8 -*- """Some tests for the Dataset class""" import tempfile import numpy as np import pytest from pint import DimensionalityError, UndefinedUnitError from qibocal.data import Dataset def random_dataset(length): data = Dataset() for _ in range(length): msr, i, q, phase = np.random.rand(len(data.df.columns)) data.add({"MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) return data def test_data_initialization(): """Test Dataset constructor""" data = Dataset() assert len(data.df.columns) == 4 assert list(data.df.columns) == ["MSR", "i", "q", "phase"] data1 = Dataset(quantities={"attenuation": "dB"}) assert len(data1.df.columns) == 5 assert list(data1.df.columns) == ["attenuation", "MSR", "i", "q", "phase"] def test_units(): """Test units of measure in Dataset""" data = Dataset() assert data.df.MSR.values.units == "volt" data1 = Dataset(quantities={"frequency": "Hz"}) assert data1.df.frequency.values.units == "hertz" with pytest.raises(UndefinedUnitError): data2 = Dataset(quantities={"fake_unit": "fake"}) def test_add(): """Test add method of Dataset""" data = random_dataset(5) assert len(data) == 5 data1 = Dataset(quantities={"attenuation": "dB"}) msr, i, q, phase, att = np.random.rand(len(data1.df.columns)) data1.add( { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "attenuation[dB]": att, } ) assert len(data1) == 1 data1.add( { "MSR[V]": 0, "i[V]": 0.0, "q[V]": 0.0, "phase[deg]": 0, "attenuation[dB]": 1, } ) assert len(data1) == 2 data2 = Dataset() msr, i, q, phase = np.random.rand(len(data2.df.columns)) with pytest.raises(DimensionalityError): data2.add({"MSR[dB]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) with pytest.raises(UndefinedUnitError): data2.add({"MSR[test]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) src/qibocal/plots/scatters.py METASEP # -*- coding: utf-8 -*- import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots from qibocal.data import Data, Dataset from qibocal.fitting.utils import cos, exp, flipping, lorenzian, rabi, ramsey def frequency_msr_phase__fast_precision(folder, routine, qubit, format): try: data_fast = Dataset.load_data(folder, routine, format, f"fast_sweep_q{qubit}") except: data_fast = Dataset(quantities={"frequency": "Hz"}) try: data_precision = Dataset.load_data( folder, routine, format, f"precision_sweep_q{qubit}" ) except: data_precision = Dataset(quantities={"frequency": "Hz"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Data( quantities=[ "fit_amplitude", "fit_center", "fit_sigma", "fit_offset", "label1", "label2", ] ) fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data_fast.get_values("frequency", "GHz"), y=data_fast.get_values("MSR", "uV"), name="Fast", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_fast.get_values("frequency", "GHz"), y=data_fast.get_values("phase", "rad"), name="Fast", ), row=1, col=2, ) fig.add_trace( go.Scatter( x=data_precision.get_values("frequency", "GHz"), y=data_precision.get_values("MSR", "uV"), name="Precision", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_precision.get_values("frequency", "GHz"), y=data_precision.get_values("phase", "rad"), name="Precision", ), row=1, col=2, ) if len(data_fast) > 0 and len(data_fit) > 0: freqrange = np.linspace( min(data_fast.get_values("frequency", "GHz")), max(data_fast.get_values("frequency", "GHz")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=freqrange, y=lorenzian( freqrange, data_fit.df["fit_amplitude"][0], data_fit.df["fit_center"][0], data_fit.df["fit_sigma"][0], data_fit.df["fit_offset"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="MSR (uV)", xaxis2_title="Frequency (GHz)", yaxis2_title="Phase (rad)", ) return fig def frequency_attenuation_msr_phase__cut(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot fig = go.Figure() # index data on a specific attenuation value smalldf = data.df[data.get_values("attenuation", "dB") == plot1d_attenuation].copy() # split multiple software averages to different datasets datasets = [] while len(smalldf): datasets.append(smalldf.drop_duplicates("frequency")) smalldf.drop(datasets[-1].index, inplace=True) fig.add_trace( go.Scatter( x=datasets[-1]["frequency"].pint.to("GHz").pint.magnitude, y=datasets[-1]["MSR"].pint.to("V").pint.magnitude, ), ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting, xaxis_title="Frequency (GHz)", yaxis_title="MSR (V)", ) return fig # For Rabi oscillations def time_msr_phase(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(quantities={"Time": "ns"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("MSR", "uV"), name="Rabi Oscillations", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("phase", "rad"), name="Rabi Oscillations", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("Time", "ns")), max(data.get_values("Time", "ns")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=rabi( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], data_fit.df["popt3"][0], data_fit.df["popt4"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) # add annotation for label[0] -> pi_pulse_duration fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} ns.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # add annotation for label[0] -> rabi_oscillations_pi_pulse_max_voltage fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # add annotation for label[0] -> rabi_oscillations_pi_pulse_max_voltage fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[2]} is {data_fit.df[params[2]][0]:.1f} ns.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # last part fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Time (ns)", yaxis_title="MSR (uV)", xaxis2_title="Time (ns)", yaxis2_title="Phase (rad)", ) return fig def gain_msr_phase(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(quantities={"gain", "dimensionless"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("gain", "dimensionless"), y=data.get_values("MSR", "uV"), name="Rabi Oscillations", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("gain", "dimensionless"), y=data.get_values("phase", "rad"), name="Rabi Oscillations", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("gain", "dimensionless")), max(data.get_values("gain", "dimensionless")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=rabi( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], data_fit.df["popt3"][0], data_fit.df["popt4"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # add annotation for label[0] -> pi_pulse_gain fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[2]} is {data_fit.df[params[2]][0]:.1f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Gain (dimensionless)", yaxis_title="MSR (uV)", ) return fig def amplitude_msr_phase(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(quantities={"amplitude", "dimensionless"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("amplitude", "dimensionless"), y=data.get_values("MSR", "uV"), name="Rabi Oscillations", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("amplitude", "dimensionless"), y=data.get_values("phase", "rad"), name="Rabi Oscillations", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("amplitude", "dimensionless")), max(data.get_values("amplitude", "dimensionless")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=rabi( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], data_fit.df["popt3"][0], data_fit.df["popt4"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # add annotation for label[0] -> pi_pulse_gain fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[2]} is {data_fit.df[params[2]][0]:.1f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Amplitude (dimensionless)", yaxis_title="MSR (uV)", ) return fig # For Ramsey oscillations def time_msr(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=("MSR (V)",), ) fig.add_trace( go.Scatter( x=data.get_values("wait", "ns"), y=data.get_values("MSR", "uV"), name="Ramsey", ), row=1, col=1, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("wait", "ns")), max(data.get_values("wait", "ns")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=ramsey( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], data_fit.df["popt3"][0], data_fit.df["popt4"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} Hz.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} ns", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"Estimated {params[2]} is {data_fit.df[params[2]][0]:.3f} Hz", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Time (ns)", yaxis_title="MSR (uV)", ) return fig # T1 def t1_time_msr_phase(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(quantities={"Time": "ns"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("MSR", "uV"), name="T1", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("phase", "rad"), name="T1", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("Time", "ns")), max(data.get_values("Time", "ns")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=exp( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} ns.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # last part fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Time (ns)", yaxis_title="MSR (uV)", xaxis2_title="Time (ns)", yaxis2_title="Phase (rad)", ) return fig # Flipping def flips_msr_phase(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset(quantities={"flips": "dimensionless"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("flips", "dimensionless"), y=data.get_values("MSR", "uV"), name="Flipping MSR", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("flips", "dimensionless"), y=data.get_values("phase", "rad"), name="Flipping Phase", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("flips", "dimensionless")), max(data.get_values("flips", "dimensionless")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=timerange, y=flipping( timerange, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], data_fit.df["popt3"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.4f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # last part fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Flips (dimensionless)", yaxis_title="MSR (uV)", xaxis2_title="Flips (dimensionless)", yaxis2_title="Phase (rad)", ) return fig # For calibrate qubit states def exc_gnd(folder, routine, qubit, format): try: data_exc = Dataset.load_data(folder, routine, format, f"data_exc_q{qubit}") except: data_exc = Dataset(quantities={"iteration": "dimensionless"}) fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=("Calibrate qubit states",), ) fig.add_trace( go.Scatter( x=data_exc.get_values("i", "V"), y=data_exc.get_values("q", "V"), name="exc_state", mode="markers", marker=dict(size=3, color="lightcoral"), ), row=1, col=1, ) try: data_gnd = Dataset.load_data(folder, routine, format, f"data_gnd_q{qubit}") except: data_gnd = Dataset(quantities={"iteration": "dimensionless"}) fig.add_trace( go.Scatter( x=data_gnd.get_values("i", "V"), y=data_gnd.get_values("q", "V"), name="gnd state", mode="markers", marker=dict(size=3, color="skyblue"), ), row=1, col=1, ) i_exc = data_exc.get_values("i", "V") q_exc = data_exc.get_values("q", "V") i_mean_exc = i_exc.mean() q_mean_exc = q_exc.mean() iq_mean_exc = complex(i_mean_exc, q_mean_exc) mod_iq_exc = abs(iq_mean_exc) * 1e6 fig.add_trace( go.Scatter( x=[i_mean_exc], y=[q_mean_exc], name=f" state1_voltage: {mod_iq_exc} <br> mean_exc_state: {iq_mean_exc}", mode="markers", marker=dict(size=10, color="red"), ), row=1, col=1, ) i_gnd = data_gnd.get_values("i", "V") q_gnd = data_gnd.get_values("q", "V") i_mean_gnd = i_gnd.mean() q_mean_gnd = q_gnd.mean() iq_mean_gnd = complex(i_mean_gnd, q_mean_gnd) mod_iq_gnd = abs(iq_mean_gnd) * 1e6 fig.add_trace( go.Scatter( x=[i_mean_gnd], y=[q_mean_gnd], name=f" state0_voltage: {mod_iq_gnd} <br> mean_gnd_state: {iq_mean_gnd}", mode="markers", marker=dict(size=10, color="blue"), ), row=1, col=1, ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="i (V)", yaxis_title="q (V)", width=1000, ) return fig # allXY def prob_gate(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset( quantities={"probability": "dimensionless", "gateNumber": "dimensionless"} ) fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=(f"allXY",), ) fig.add_trace( go.Scatter( x=data.get_values("gateNumber", "dimensionless"), y=data.get_values("probability", "dimensionless"), mode="markers", name="Probabilities", ), row=1, col=1, ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Gate sequence number", yaxis_title="Z projection probability of qubit state |o>", ) return fig # allXY def prob_gate_iteration(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset( quantities={ "probability": "dimensionless", "gateNumber": "dimensionless", "beta_param": "dimensionless", } ) data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=(f"allXY",), ) gates = len(data.get_values("gateNumber", "dimensionless")) # print(gates) import numpy as np for n in range(gates // 21): data_start = n * 21 data_end = data_start + 21 beta_param = np.array(data.get_values("beta_param", "dimensionless"))[ data_start ] gates = np.array(data.get_values("gateNumber", "dimensionless"))[ data_start:data_end ] probabilities = np.array(data.get_values("probability", "dimensionless"))[ data_start:data_end ] c = "#" + "{:06x}".format(n * 823000) fig.add_trace( go.Scatter( x=gates, y=probabilities, mode="markers+lines", line=dict(color=c), name=f"beta_parameter = {beta_param}", marker_size=16, ), row=1, col=1, ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Gate sequence number", yaxis_title="Z projection probability of qubit state |o>", ) return fig # beta param tuning def msr_beta(folder, routine, qubit, format): try: data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") except: data = Dataset( name=f"data_q{qubit}", quantities={"beta_param": "dimensionless"} ) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Dataset() fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.01, vertical_spacing=0.01, subplot_titles=(f"beta_param_tuning",), ) c = "#6597aa" fig.add_trace( go.Scatter( x=data.get_values("beta_param", "dimensionless"), y=data.get_values("MSR", "uV"), line=dict(color=c), mode="markers", name="[Rx(pi/2) - Ry(pi)] - [Ry(pi) - Rx(pi/2)]", ), row=1, col=1, ) # add fitting traces if len(data) > 0 and len(data_fit) > 0: beta_param = np.linspace( min(data.get_values("beta_param", "dimensionless")), max(data.get_values("beta_param", "dimensionless")), 20, ) params = [i for i in list(data_fit.df.keys()) if "popt" not in i] fig.add_trace( go.Scatter( x=beta_param, y=cos( beta_param, data_fit.df["popt0"][0], data_fit.df["popt1"][0], data_fit.df["popt2"][0], data_fit.df["popt3"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.4f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Beta parameter", yaxis_title="MSR[uV]", ) return fig def dispersive_frequency_msr_phase(folder, routine, qubit, formato): try: data_spec = Dataset.load_data(folder, routine, formato, f"data_q{qubit}") except: data_spec = Dataset(name=f"data_q{qubit}", quantities={"frequency": "Hz"}) try: data_shifted = Dataset.load_data( folder, routine, formato, f"data_shifted_q{qubit}" ) except: data_shifted = Dataset( name=f"data_shifted_q{qubit}", quantities={"frequency": "Hz"} ) try: data_fit = Data.load_data(folder, routine, formato, f"fit_q{qubit}") except: data_fit = Data( quantities=[ "fit_amplitude", "fit_center", "fit_sigma", "fit_offset", "label1", "label2", ] ) try: data_fit_shifted = Data.load_data( folder, routine, formato, f"fit_shifted_q{qubit}" ) except: data_fit_shifted = Data( quantities=[ "fit_amplitude", "fit_center", "fit_sigma", "fit_offset", "label1", "label2", ] ) fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data_spec.get_values("frequency", "GHz"), y=data_spec.get_values("MSR", "uV"), name="Spectroscopy", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_spec.get_values("frequency", "GHz"), y=data_spec.get_values("phase", "rad"), name="Spectroscopy", ), row=1, col=2, ) fig.add_trace( go.Scatter( x=data_shifted.get_values("frequency", "GHz"), y=data_shifted.get_values("MSR", "uV"), name="Shifted Spectroscopy", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_shifted.get_values("frequency", "GHz"), y=data_shifted.get_values("phase", "rad"), name="Shifted Spectroscopy", ), row=1, col=2, ) # fitting traces if len(data_spec) > 0 and len(data_fit) > 0: freqrange = np.linspace( min(data_spec.get_values("frequency", "GHz")), max(data_spec.get_values("frequency", "GHz")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=freqrange, y=lorenzian( freqrange, data_fit.df["fit_amplitude"][0], data_fit.df["fit_center"][0], data_fit.df["fit_sigma"][0], data_fit.df["fit_offset"][0], ), name="Fit spectroscopy", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # fitting shifted traces if len(data_shifted) > 0 and len(data_fit_shifted) > 0: freqrange = np.linspace( min(data_shifted.get_values("frequency", "GHz")), max(data_shifted.get_values("frequency", "GHz")), 20, ) params = [i for i in list(data_fit_shifted.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=freqrange, y=lorenzian( freqrange, data_fit_shifted.df["fit_amplitude"][0], data_fit_shifted.df["fit_center"][0], data_fit_shifted.df["fit_sigma"][0], data_fit_shifted.df["fit_offset"][0], ), name="Fit shifted spectroscopy", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"The estimated shifted {params[0]} is {data_fit_shifted.df[params[0]][0]:.1f} Hz.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="MSR (uV)", xaxis2_title="Frequency (GHz)", yaxis2_title="Phase (rad)", ) return fig src/qibocal/plots/heatmaps.py METASEP # -*- coding: utf-8 -*- import os.path import plotly.graph_objects as go from plotly.subplots import make_subplots from qibocal.data import Dataset def frequency_flux_msr_phase(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="Current (A)", xaxis2_title="Frequency (GHz)", yaxis2_title="Current (A)", ) return fig def frequency_attenuation_msr_phase(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("attenuation", "dB"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("attenuation", "dB"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="Attenuation (dB)", xaxis2_title="Frequency (GHz)", yaxis2_title="Attenuation (dB)", ) return fig def frequency_flux_msr_phase__matrix(folder, routine, qubit, format): fluxes = [] for i in range(25): # FIXME: 25 is hardcoded file = f"{folder}/data/{routine}/data_q{qubit}_f{i}.csv" if os.path.exists(file): fluxes += [i] if len(fluxes) < 1: nb = 1 else: nb = len(fluxes) fig = make_subplots( rows=2, cols=nb, horizontal_spacing=0.1, vertical_spacing=0.1, x_title="Frequency (Hz)", y_title="Current (A)", shared_xaxes=True, shared_yaxes=True, ) for j in fluxes: if j == fluxes[-1]: showscale = True else: showscale = False data = Dataset.load_data(folder, routine, format, f"data_q{qubit}_f{j}") fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("MSR", "V"), showscale=showscale, ), row=1, col=j, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("phase", "rad"), showscale=showscale, ), row=2, col=j, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting ) return fig def duration_gain_msr_phase(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("gain", "dimensionless"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("gain", "dimensionless"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="duration (ns)", yaxis_title="gain (dimensionless)", xaxis2_title="duration (ns)", yaxis2_title="gain (dimensionless)", ) return fig def duration_amplitude_msr_phase(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("amplitude", "dimensionless"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("amplitude", "dimensionless"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="duration (ns)", yaxis_title="amplitude (dimensionless)", xaxis2_title="duration (ns)", yaxis2_title="amplitude (dimensionless)", ) return fig src/qibocal/plots/__init__.py METASEP # -*- coding: utf-8 -*- from qibocal.plots.heatmaps import * from qibocal.plots.scatters import * src/qibocal/fitting/utils.py METASEP # -*- coding: utf-8 -*- import re import numpy as np def lorenzian(frequency, amplitude, center, sigma, offset): # http://openafox.com/science/peak-function-derivations.html return (amplitude / np.pi) * ( sigma / ((frequency - center) ** 2 + sigma**2) ) + offset def rabi(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # Period T : 1/p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(-x * p4) def ramsey(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # DeltaFreq : p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(-x * p4) def exp(x, *p): return p[0] - p[1] * np.exp(-1 * x * p[2]) def flipping(x, p0, p1, p2, p3): # A fit to Flipping Qubit oscillation # Epsilon?? shoule be Amplitude : p[0] # Offset : p[1] # Period of oscillation : p[2] # phase for the first point corresponding to pi/2 rotation : p[3] return np.sin(x * 2 * np.pi / p2 + p3) * p0 + p1 def cos(x, p0, p1, p2, p3): # Offset : p[0] # Amplitude : p[1] # Period : p[2] # Phase : p[3] return p0 + p1 * np.cos(2 * np.pi * x / p2 + p3) def parse(key): name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) return name, unit src/qibocal/fitting/methods.py METASEP # -*- coding: utf-8 -*- """Routine-specific method for post-processing data acquired.""" import lmfit import numpy as np from scipy.optimize import curve_fit from qibocal.config import log from qibocal.data import Data from qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey def lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None): """Fitting routine for resonator spectroscopy""" if fit_file_name == None: data_fit = Data( name=f"fit_q{qubit}", quantities=[ "fit_amplitude", "fit_center", "fit_sigma", "fit_offset", labels[1], labels[0], ], ) else: data_fit = Data( name=fit_file_name + f"_q{qubit}", quantities=[ "fit_amplitude", "fit_center", "fit_sigma", "fit_offset", labels[1], labels[0], ], ) frequencies = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) # Create a lmfit model for fitting equation defined in resonator_peak model_Q = lmfit.Model(lorenzian) # Guess parameters for Lorentzian max or min if (nqubits == 1 and labels[0] == "resonator_freq") or ( nqubits != 1 and labels[0] == "qubit_freq" ): guess_center = frequencies[ np.argmax(voltages) ] # Argmax = Returns the indices of the maximum values along an axis. guess_offset = np.mean( voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))] ) guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center) guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi else: guess_center = frequencies[ np.argmin(voltages) ] # Argmin = Returns the indices of the minimum values along an axis. guess_offset = np.mean( voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))] ) guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center) guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi # Add guessed parameters to the model model_Q.set_param_hint("center", value=guess_center, vary=True) model_Q.set_param_hint("sigma", value=guess_sigma, vary=True) model_Q.set_param_hint("amplitude", value=guess_amp, vary=True) model_Q.set_param_hint("offset", value=guess_offset, vary=True) guess_parameters = model_Q.make_params() # fit the model with the data and guessed parameters try: fit_res = model_Q.fit( data=voltages, frequency=frequencies, params=guess_parameters ) except: log.warning("The fitting was not successful") return data_fit # get the values for postprocessing and for legend. f0 = fit_res.best_values["center"] BW = fit_res.best_values["sigma"] * 2 Q = abs(f0 / BW) peak_voltage = ( fit_res.best_values["amplitude"] / (fit_res.best_values["sigma"] * np.pi) + fit_res.best_values["offset"] ) freq = f0 * 1e6 data_fit.add( { labels[1]: peak_voltage, labels[0]: freq, "fit_amplitude": fit_res.best_values["amplitude"], "fit_center": fit_res.best_values["center"], "fit_sigma": fit_res.best_values["sigma"], "fit_offset": fit_res.best_values["offset"], } ) return data_fit def rabi_fit(data, x, y, qubit, nqubits, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", "popt4", labels[0], labels[1], labels[2], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmin(voltages.values)], np.pi / 2, 0.1e-6, ] else: pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmax(voltages.values)], np.pi / 2, 0.1e-6, ] try: popt, pcov = curve_fit( rabi, time.values, voltages.values, p0=pguess, maxfev=10000 ) smooth_dataset = rabi(time.values, *popt) pi_pulse_duration = np.abs((1.0 / popt[2]) / 2) rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6 t1 = 1.0 / popt[4] # double check T1 except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], "popt4": popt[4], labels[0]: pi_pulse_duration, labels[1]: rabi_oscillations_pi_pulse_max_voltage, labels[2]: t1, } ) return data_fit def ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", "popt4", labels[0], labels[1], labels[2], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmin(voltages.values)], np.pi / 2, 500e-9, ] try: popt, pcov = curve_fit( ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000 ) delta_fitting = popt[2] delta_phys = int((delta_fitting * sampling_rate) - offset_freq) corrected_qubit_frequency = int(qubit_freq - delta_phys) t2 = 1.0 / popt[4] except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], "popt4": popt[4], labels[0]: delta_phys, labels[1]: corrected_qubit_frequency, labels[2]: t2, } ) return data_fit def t1_fit(data, x, y, qubit, nqubits, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", labels[0], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [ max(voltages.values), (max(voltages.values) - min(voltages.values)), 1 / 250, ] else: pguess = [ min(voltages.values), (max(voltages.values) - min(voltages.values)), 1 / 250, ] try: popt, pcov = curve_fit( exp, time.values, voltages.values, p0=pguess, maxfev=2000000 ) t1 = abs(1 / popt[2]) except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], labels[0]: t1, } ) return data_fit def flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", labels[0], labels[1], ], ) flips = data.get_values(*parse(x)) # Check X data stores. N flips or i? voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [0.0003, np.mean(voltages), -18, 0] # epsilon guess parameter else: pguess = [0.0003, np.mean(voltages), 18, 0] # epsilon guess parameter try: popt, pcov = curve_fit(flipping, flips, voltages, p0=pguess, maxfev=2000000) epsilon = -np.pi / popt[2] amplitude_delta = np.pi / (np.pi + epsilon) corrected_amplitude = amplitude_delta * pi_pulse_amplitude # angle = (niter * 2 * np.pi / popt[2] + popt[3]) / (1 + 4 * niter) # amplitude_delta = angle * 2 / np.pi * pi_pulse_amplitude except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], labels[0]: amplitude_delta, labels[1]: corrected_amplitude, } ) return data_fit def drag_tunning_fit(data, x, y, qubit, nqubits, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", labels[0], ], ) beta_params = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) pguess = [ 0, # Offset: p[0] beta_params.values[np.argmax(voltages)] - beta_params.values[np.argmin(voltages)], # Amplitude: p[1] 4, # Period: p[2] 0.3, # Phase: p[3] ] try: popt, pcov = curve_fit(cos, beta_params.values, voltages.values) smooth_dataset = cos(beta_params.values, popt[0], popt[1], popt[2], popt[3]) beta_optimal = beta_params.values[np.argmin(smooth_dataset)] except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], labels[0]: beta_optimal, } ) return data_fit src/qibocal/fitting/__init__.py METASEP src/qibocal/cli/builders.py METASEP # -*- coding: utf-8 -*- import datetime import inspect import os import shutil import yaml from qibocal import calibrations from qibocal.config import log, raise_error from qibocal.data import Data def load_yaml(path): """Load yaml file from disk.""" with open(path, "r") as file: data = yaml.safe_load(file) return data class ActionBuilder: """Class for parsing and executing runcards. Args: runcard (path): path containing the runcard. folder (path): path for the output folder. force (bool): option to overwrite the output folder if it exists already. """ def __init__(self, runcard, folder=None, force=False): path, self.folder = self._generate_output_folder(folder, force) self.runcard = load_yaml(runcard) # Qibolab default backend if not provided in runcard. backend_name = self.runcard.get("backend", "qibolab") platform_name = self.runcard.get("platform", "dummy") self.backend, self.platform = self._allocate_backend( backend_name, platform_name ) self.qubits = self.runcard["qubits"] self.format = self.runcard["format"] # Saving runcard self.save_runcards(path, runcard, platform_name) self.save_meta(path, self.folder) @staticmethod def _generate_output_folder(folder, force): """Static method for generating the output folder. Args: folder (path): path for the output folder. If None it will be created a folder automatically force (bool): option to overwrite the output folder if it exists already. """ if folder is None: import getpass e = datetime.datetime.now() user = getpass.getuser().replace(".", "-") date = e.strftime("%Y-%m-%d") folder = f"{date}-{'000'}-{user}" num = 0 while os.path.exists(folder): log.warning(f"Directory {folder} already exists.") num += 1 folder = f"{date}-{str(num).rjust(3, '0')}-{user}" log.warning(f"Trying to create directory {folder}") elif os.path.exists(folder) and not force: raise_error(RuntimeError, f"Directory {folder} already exists.") elif os.path.exists(folder) and force: log.warning(f"Deleting previous directory {folder}.") shutil.rmtree(os.path.join(os.getcwd(), folder)) path = os.path.join(os.getcwd(), folder) log.info(f"Creating directory {folder}.") os.makedirs(path) return path, folder def _allocate_backend(self, backend_name, platform_name): """Allocate the platform using Qibolab.""" from qibo.backends import GlobalBackend, set_backend from qibolab.platform import Platform from qibolab.platforms.abstract import AbstractPlatform set_backend(backend=backend_name, platform=platform_name) backend = GlobalBackend() if backend_name == "qibolab": platform = backend.platform else: platform = None return backend, platform def save_runcards(self, path, runcard, platform_name): """Save the output runcards.""" shutil.copy(runcard, f"{path}/runcard.yml") if self.platform is not None: from qibolab.paths import qibolab_folder platform_runcard = qibolab_folder / "runcards" / f"{platform_name}.yml" shutil.copy(platform_runcard, f"{path}/platform.yml") def save_meta(self, path, folder): import qibocal e = datetime.datetime.now(datetime.timezone.utc) meta = {} meta["title"] = folder meta["backend"] = str(self.backend) meta["platform"] = str(self.backend.platform) meta["date"] = e.strftime("%Y-%m-%d") meta["start-time"] = e.strftime("%H:%M:%S") meta["end-time"] = e.strftime("%H:%M:%S") meta["versions"] = self.backend.versions meta["versions"]["qibocal"] = qibocal.__version__ with open(f"{path}/meta.yml", "w") as file: yaml.dump(meta, file) def _build_single_action(self, name): """Helper method to parse the actions in the runcard.""" f = getattr(calibrations, name) path = os.path.join(self.folder, f"data/{name}/") os.makedirs(path) sig = inspect.signature(f) params = self.runcard["actions"][name] for param in list(sig.parameters)[2:-1]: if param not in params: raise_error(AttributeError, f"Missing parameter {param} in runcard.") if f.__annotations__["qubit"] == int: single_qubit_action = True else: single_qubit_action = False return f, params, path, single_qubit_action def execute(self): """Method to execute sequentially all the actions in the runcard.""" if self.platform is not None: self.platform.connect() self.platform.setup() self.platform.start() for action in self.runcard["actions"]: routine, args, path, single_qubit_action = self._build_single_action(action) self._execute_single_action(routine, args, path, single_qubit_action) if self.platform is not None: self.platform.stop() self.platform.disconnect() def _execute_single_action(self, routine, arguments, path, single_qubit): """Method to execute a single action and retrieving the results.""" if self.format is None: raise_error(ValueError, f"Cannot store data using {self.format} format.") if single_qubit: for qubit in self.qubits: results = routine(self.platform, qubit, **arguments) for data in results: getattr(data, f"to_{self.format}")(path) if self.platform is not None: self.update_platform_runcard(qubit, routine.__name__) else: results = routine(self.platform, self.qubits, **arguments) for data in results: getattr(data, f"to_{self.format}")(path) if self.platform is not None: self.update_platform_runcard(qubit, routine.__name__) def update_platform_runcard(self, qubit, routine): try: data_fit = Data.load_data( self.folder, routine, self.format, f"fit_q{qubit}" ) except: data_fit = Data() params = [i for i in list(data_fit.df.keys()) if "fit" not in i] settings = load_yaml(f"{self.folder}/platform.yml") for param in params: settings["characterization"]["single_qubit"][qubit][param] = int( data_fit.df[param][0] ) with open(f"{self.folder}/data/{routine}/platform.yml", "a+") as file: yaml.dump( settings, file, sort_keys=False, indent=4, default_flow_style=None ) def dump_report(self): from qibocal.web.report import create_report # update end time meta = load_yaml(f"{self.folder}/meta.yml") e = datetime.datetime.now(datetime.timezone.utc) meta["end-time"] = e.strftime("%H:%M:%S") with open(f"{self.folder}/meta.yml", "w") as file: yaml.dump(meta, file) create_report(self.folder) class ReportBuilder: """Parses routines and plots to report and live plotting page. Args: path (str): Path to the data folder to generate report for. """ def __init__(self, path): self.path = path self.metadata = load_yaml(os.path.join(path, "meta.yml")) # find proper path title base, self.title = os.path.join(os.getcwd(), path), "" while self.title in ("", "."): base, self.title = os.path.split(base) self.runcard = load_yaml(os.path.join(path, "runcard.yml")) self.format = self.runcard.get("format") self.qubits = self.runcard.get("qubits") # create calibration routine objects # (could be incorporated to :meth:`qibocal.cli.builders.ActionBuilder._build_single_action`) self.routines = [] for action in self.runcard.get("actions"): if hasattr(calibrations, action): routine = getattr(calibrations, action) else: raise_error(ValueError, f"Undefined action {action} in report.") if not hasattr(routine, "plots"): routine.plots = [] self.routines.append(routine) def get_routine_name(self, routine): """Prettify routine's name for report headers.""" return routine.__name__.replace("_", " ").title() def get_figure(self, routine, method, qubit): """Get html figure for report. Args: routine (Callable): Calibration method. method (Callable): Plot method. qubit (int): Qubit id. """ import tempfile figure = method(self.path, routine.__name__, qubit, self.format) with tempfile.NamedTemporaryFile() as temp: figure.write_html(temp.name, include_plotlyjs=False, full_html=False) fightml = temp.read().decode("utf-8") return fightml def get_live_figure(self, routine, method, qubit): """Get url to dash page for live plotting. This url is used by :meth:`qibocal.web.app.get_graph`. Args: routine (Callable): Calibration method. method (Callable): Plot method. qubit (int): Qubit id. """ return os.path.join( method.__name__, self.path, routine.__name__, str(qubit), self.format, ) src/qibocal/cli/_base.py METASEP # -*- coding: utf-8 -*- """Adds global CLI options.""" import base64 import pathlib import shutil import socket import subprocess import uuid from urllib.parse import urljoin import click from qibo.config import log, raise_error from qibocal.cli.builders import ActionBuilder CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) # options for report upload UPLOAD_HOST = ( "qcvv@localhost" if socket.gethostname() == "saadiyat" else "[email protected]" ) TARGET_DIR = "qcvv-reports/" ROOT_URL = "http://login.qrccluster.com:9000/" @click.command(context_settings=CONTEXT_SETTINGS) @click.argument("runcard", metavar="RUNCARD", type=click.Path(exists=True)) @click.option( "folder", "-o", type=click.Path(), help="Output folder. If not provided a standard name will generated.", ) @click.option( "force", "-f", is_flag=True, help="Use --force option to overwrite the output folder.", ) def command(runcard, folder, force=None): """qibocal: Quantum Calibration Verification and Validation using Qibo. Arguments: - RUNCARD: runcard with declarative inputs. """ builder = ActionBuilder(runcard, folder, force) builder.execute() builder.dump_report() @click.command(context_settings=CONTEXT_SETTINGS) @click.option( "port", "-p", "--port", default=8050, type=int, help="Localhost port to launch dash server.", ) @click.option( "debug", "-d", "--debug", is_flag=True, help="Launch server in debugging mode.", ) def live_plot(port, debug): """Real time plotting of calibration data on a dash server.""" import socket from qibocal.web.app import app # change port if it is already used while True: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: if s.connect_ex(("localhost", port)) != 0: break port += 1 app.run_server(debug=debug, port=port) @click.command(context_settings=CONTEXT_SETTINGS) @click.argument("output_folder", metavar="FOLDER", type=click.Path(exists=True)) def upload(output_folder): """Uploads output folder to server""" output_path = pathlib.Path(output_folder) # check the rsync command exists. if not shutil.which("rsync"): raise_error( RuntimeError, "Could not find the rsync command. Please make sure it is installed.", ) # check that we can authentica with a certificate ssh_command_line = ( "ssh", "-o", "PreferredAuthentications=publickey", "-q", UPLOAD_HOST, "exit", ) str_line = " ".join(repr(ele) for ele in ssh_command_line) log.info(f"Checking SSH connection to {UPLOAD_HOST}.") try: subprocess.run(ssh_command_line, check=True) except subprocess.CalledProcessError as e: raise RuntimeError( ( "Could not validate the SSH key. " "The command\n%s\nreturned a non zero exit status. " "Please make sure that your public SSH key is on the server." ) % str_line ) from e except OSError as e: raise RuntimeError( "Could not run the command\n{}\n: {}".format(str_line, e) ) from e log.info("Connection seems OK.") # upload output randname = base64.urlsafe_b64encode(uuid.uuid4().bytes).decode() newdir = TARGET_DIR + randname rsync_command = ( "rsync", "-aLz", "--chmod=ug=rwx,o=rx", f"{output_path}/", f"{UPLOAD_HOST}:{newdir}", ) log.info(f"Uploading output ({output_path}) to {UPLOAD_HOST}") try: subprocess.run(rsync_command, check=True) except subprocess.CalledProcessError as e: msg = f"Failed to upload output: {e}" raise RuntimeError(msg) from e url = urljoin(ROOT_URL, randname) log.info(f"Upload completed. The result is available at:\n{url}") src/qibocal/cli/__init__.py METASEP # -*- coding: utf-8 -*- """CLI entry point.""" from ._base import command, live_plot, upload src/qibocal/calibrations/__init__.py METASEP # -*- coding: utf-8 -*- from qibocal.calibrations.characterization.allXY import * from qibocal.calibrations.characterization.calibrate_qubit_states import * from qibocal.calibrations.characterization.flipping import * from qibocal.calibrations.characterization.qubit_spectroscopy import * from qibocal.calibrations.characterization.rabi_oscillations import * from qibocal.calibrations.characterization.ramsey import * from qibocal.calibrations.characterization.resonator_spectroscopy import * from qibocal.calibrations.characterization.t1 import * from qibocal.calibrations.protocols.test import * serverscripts/qcvv-update-on-change.py METASEP #!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import curio import inotify.adapters import inotify.constants from curio import subprocess async def main(folder, exe_args): i = inotify.adapters.Inotify() i.add_watch(folder) for event in i.event_gen(yield_nones=False): if event is not None: (header, _, _, _) = event if ( (header.mask & inotify.constants.IN_CREATE) or (header.mask & inotify.constants.IN_DELETE) or (header.mask & inotify.constants.IN_MODIFY) ): await subprocess.run(exe_args) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("folder") parser.add_argument("exe_args", nargs="+") args = parser.parse_args() curio.run(main(args.folder, args.exe_args)) serverscripts/qcvv-index.reports.py METASEP # -*- coding: utf-8 -*- """qcvv-index-reports.py Generates a JSON index with reports information. """ import json import pathlib import sys from collections import ChainMap import yaml ROOT = "/home/users/qcvv/qcvv-reports" ROOT_URL = "http://login.qrccluster.com:9000/" OUT = "/home/users/qcvv/qcvv-reports/index.json" DEFAULTS = { "title": "-", "date": "-", "platform": "-", "start-time": "-", "end-time": "-", } REQUIRED_FILE_METADATA = {"title", "date", "platform", "start-time" "end-time"} def meta_from_path(p): meta = ChainMap(DEFAULTS) yaml_meta = p / "meta.yml" yaml_res = {} if yaml_meta.exists(): with yaml_meta.open() as f: try: yaml_res = yaml.safe_load(f) except yaml.YAMLError as e: print(f"Error processing {yaml_meta}: {e}", file=sys.stderr) meta = meta.new_child(yaml_res) return meta def register(p): path_meta = meta_from_path(p) title, date, platform, start_time, end_time = ( path_meta["title"], path_meta["date"], path_meta["platform"], path_meta["start-time"], path_meta["end-time"], ) url = ROOT_URL + p.name titlelink = f'<a href="{url}">{title}</a>' return (titlelink, date, platform, start_time, end_time) def make_index(): root_path = pathlib.Path(ROOT) data = [] for p in root_path.iterdir(): if p.is_dir(): try: res = register(p) data.append(res) except: print("Error processing folder", p, file=sys.stderr) raise with open(OUT, "w") as f: json.dump({"data": data}, f) if __name__ == "__main__": make_index() src/qibocal/decorators.py METASEP # -*- coding: utf-8 -*- """Decorators implementation.""" import os from qibocal.config import raise_error def plot(header, method): """Decorator for adding plots in the report and live plotting page. Args: header (str): Header of the plot to use in the report. method (Callable): Plotting method defined under ``qibocal.plots``. """ def wrapped(f): if hasattr(f, "plots"): # insert in the beginning of the list to have # proper plot ordering in the report f.plots.insert(0, (header, method)) else: f.plots = [(header, method)] return f return wrapped src/qibocal/data.py METASEP # -*- coding: utf-8 -*- """Implementation of Dataset class to store measurements.""" import re from abc import abstractmethod import pandas as pd import pint_pandas from qibocal.config import raise_error class AbstractDataset: def __init__(self, name=None): if name is None: self.name = "data" else: self.name = name self.df = pd.DataFrame() def __add__(self, data): self.df = pd.concat([self.df, data.df], ignore_index=True) return self @abstractmethod def add(self, data): raise_error(NotImplementedError) def __len__(self): """Computes the length of the dataset.""" return len(self.df) @abstractmethod def load_data(cls, folder, routine, format, name): raise_error(NotImplementedError) @abstractmethod def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" if self.quantities == None: self.df.to_csv(f"{path}/{self.name}.csv") else: self.df.pint.dequantify().to_csv(f"{path}/{self.name}.csv") def to_pickle(self, path): """Save data in pickel file. Args: path (str): Path containing output folder.""" self.df.to_pickle(f"{path}/{self.name}.pkl") class Dataset(AbstractDataset): """Class to store the data measured during the calibration routines. It is a wrapper to a pandas DataFrame with units of measure from the Pint library. Args: quantities (dict): dictionary containing additional quantities that the user may save other than the pulse sequence output. The keys are the name of the quantities and the corresponding values are the units of measure. """ def __init__(self, name=None, quantities=None): super().__init__(name=name) self.df = pd.DataFrame( { "MSR": pd.Series(dtype="pint[V]"), "i": pd.Series(dtype="pint[V]"), "q": pd.Series(dtype="pint[V]"), "phase": pd.Series(dtype="pint[deg]"), } ) self.quantities = {"MSR": "V", "i": "V", "q": "V", "phase": "deg"} if quantities is not None: self.quantities.update(quantities) for name, unit in quantities.items(): self.df.insert(0, name, pd.Series(dtype=f"pint[{unit}]")) from pint import UnitRegistry self.ureg = UnitRegistry() def add(self, data): """Add a row to dataset. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ l = len(self) for key, value in data.items(): name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) # TODO: find a better way to do this self.df.loc[l, name] = value * self.ureg(unit) def get_values(self, quantity, unit): """Get values of a quantity in specified units. Args: quantity (str): Quantity to get the values of. unit (str): Unit of the returned values. Returns: ``pd.Series`` with the quantity values in the given units. """ return self.df[quantity].pint.to(unit).pint.magnitude @classmethod def load_data(cls, folder, routine, format, name): """Load data from specific format. Args: folder (path): path to the output folder from which the data will be loaded routine (str): calibration routine data to be loaded format (str): data format. Possible choices are 'csv' and 'pickle'. Returns: dataset (``Dataset``): dataset object with the loaded data. """ obj = cls() if format == "csv": file = f"{folder}/data/{routine}/{name}.csv" obj.df = pd.read_csv(file, header=[0, 1]) obj.df = obj.df.pint.quantify(level=-1) obj.df.pop("Unnamed: 0_level_0") elif format == "pickle": file = f"{folder}/data/{routine}/{name}.pkl" obj.df = pd.read_pickle(file) else: raise_error(ValueError, f"Cannot load data using {format} format.") return obj def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" self.df.pint.dequantify().to_csv(f"{path}/{self.name}.csv") class Data(AbstractDataset): """Class to store the data obtained from calibration routines. It is a wrapper to a pandas DataFrame. Args: quantities (dict): dictionary quantities to be saved. """ def __init__(self, name=None, quantities=None): super().__init__(name=name) if quantities is not None: self.quantities = quantities for name in quantities: self.df.insert(0, name, pd.Series(dtype=object)) def add(self, data): """Add a row to dataset. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ l = len(self) for key, value in data.items(): self.df.loc[l, key] = value def get_values(self, quantity): """Get values of a quantity in specified units. Args: quantity (str): Quantity to get the values of. Returns: ``pd.Series`` with the quantity values in the given units. """ return self.df[quantity] @classmethod def load_data(cls, folder, routine, format, name): """Load data from specific format. Args: folder (path): path to the output folder from which the data will be loaded routine (str): calibration routine data to be loaded format (str): data format. Possible choices are 'csv' and 'pickle'. Returns: dataset (``Dataset``): dataset object with the loaded data. """ obj = cls() if format == "csv": file = f"{folder}/data/{routine}/{name}.csv" obj.df = pd.read_csv(file) obj.df.pop("Unnamed: 0") elif format == "pickle": file = f"{folder}/data/{routine}/{name}.pkl" obj.df = pd.read_pickle(file) else: raise_error(ValueError, f"Cannot load data using {format} format.") return obj def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" self.df.to_csv(f"{path}/{self.name}.csv") def to_pickle(self, path): """Save data in pickel file. Args: path (str): Path containing output folder.""" self.df.to_pickle(f"{path}/{self.name}.pkl") src/qibocal/config.py METASEP # -*- coding: utf-8 -*- """Custom logger implemenation.""" import logging import os # Logging level from 0 (all) to 4 (errors) (see https://docs.python.org/3/library/logging.html#logging-levels) QIBOCAL_LOG_LEVEL = 1 if "QIBOCAL_LOG_LEVEL" in os.environ: # pragma: no cover QIBOCAL_LOG_LEVEL = 10 * int(os.environ.get("QIBOCAL_LOG_LEVEL")) def raise_error(exception, message=None, args=None): """Raise exception with logging error. Args: exception (Exception): python exception. message (str): the error message. """ log.error(message) if args: raise exception(message, args) else: raise exception(message) # Configuration for logging mechanism class CustomHandler(logging.StreamHandler): """Custom handler for logging algorithm.""" def format(self, record): """Format the record with specific format.""" from qibocal import __version__ fmt = f"[Qibocal {__version__}|%(levelname)s|%(asctime)s]: %(message)s" return logging.Formatter(fmt, datefmt="%Y-%m-%d %H:%M:%S").format(record) # allocate logger object log = logging.getLogger(__name__) log.setLevel(QIBOCAL_LOG_LEVEL) log.addHandler(CustomHandler()) src/qibocal/__init__.py METASEP # -*- coding: utf-8 -*- from .cli import command, live_plot, upload """qibocal: Quantum Calibration Verification and Validation using Qibo.""" import importlib.metadata as im __version__ = im.version(__package__) src/qcvv/fitting/methods.py METASEP
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n t1 = abs(1 / popt[2])\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n t1 = abs(1 / popt[2])\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n t1 = abs(1 / popt[2])\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n labels[0]: t1,\n }\n )\n return data_fit\n\n\ndef flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n labels[1],\n ],\n )\n\n flips = data.get_values(*parse(x)) # Check X data stores. N flips or i?", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n t1 = abs(1 / popt[2])\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n labels[0]: t1,\n }\n )\n return data_fit\n\n\ndef flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n labels[1],\n ],\n )\n\n flips = data.get_values(*parse(x)) # Check X data stores. N flips or i?\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [0.0003, np.mean(voltages), -18, 0] # epsilon guess parameter\n else:\n pguess = [0.0003, np.mean(voltages), 18, 0] # epsilon guess parameter\n\n try:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n t1 = abs(1 / popt[2])\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n labels[0]: t1,\n }\n )\n return data_fit\n\n\ndef flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n labels[1],\n ],\n )\n\n flips = data.get_values(*parse(x)) # Check X data stores. N flips or i?\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [0.0003, np.mean(voltages), -18, 0] # epsilon guess parameter\n else:\n pguess = [0.0003, np.mean(voltages), 18, 0] # epsilon guess parameter\n\n try:\n popt, pcov = curve_fit(flipping, flips, voltages, p0=pguess, maxfev=2000000)\n epsilon = -np.pi / popt[2]\n amplitude_delta = np.pi / (np.pi + epsilon)\n corrected_amplitude = amplitude_delta * pi_pulse_amplitude\n # angle = (niter * 2 * np.pi / popt[2] + popt[3]) / (1 + 4 * niter)\n # amplitude_delta = angle * 2 / np.pi * pi_pulse_amplitude\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n labels[0]: amplitude_delta,\n labels[1]: corrected_amplitude,\n }\n )\n return data_fit\n\n\ndef drag_tunning_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n ],\n )\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n t1 = abs(1 / popt[2])\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n labels[0]: t1,\n }\n )\n return data_fit\n\n\ndef flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n labels[1],\n ],\n )\n\n flips = data.get_values(*parse(x)) # Check X data stores. N flips or i?\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [0.0003, np.mean(voltages), -18, 0] # epsilon guess parameter\n else:\n pguess = [0.0003, np.mean(voltages), 18, 0] # epsilon guess parameter\n\n try:\n popt, pcov = curve_fit(flipping, flips, voltages, p0=pguess, maxfev=2000000)\n epsilon = -np.pi / popt[2]\n amplitude_delta = np.pi / (np.pi + epsilon)\n corrected_amplitude = amplitude_delta * pi_pulse_amplitude\n # angle = (niter * 2 * np.pi / popt[2] + popt[3]) / (1 + 4 * niter)\n # amplitude_delta = angle * 2 / np.pi * pi_pulse_amplitude\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n labels[0]: amplitude_delta,\n labels[1]: corrected_amplitude,\n }\n )\n return data_fit\n\n\ndef drag_tunning_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n ],\n )\n\n beta_params = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n 0, # Offset: p[0]\n beta_params.values[np.argmax(voltages)]\n - beta_params.values[np.argmin(voltages)], # Amplitude: p[1]\n 4, # Period: p[2]\n 0.3, # Phase: p[3]\n ]\n\n try:", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n t1 = abs(1 / popt[2])\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n labels[0]: t1,\n }\n )\n return data_fit\n\n\ndef flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n labels[1],\n ],\n )\n\n flips = data.get_values(*parse(x)) # Check X data stores. N flips or i?\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [0.0003, np.mean(voltages), -18, 0] # epsilon guess parameter\n else:\n pguess = [0.0003, np.mean(voltages), 18, 0] # epsilon guess parameter\n\n try:\n popt, pcov = curve_fit(flipping, flips, voltages, p0=pguess, maxfev=2000000)\n epsilon = -np.pi / popt[2]\n amplitude_delta = np.pi / (np.pi + epsilon)\n corrected_amplitude = amplitude_delta * pi_pulse_amplitude\n # angle = (niter * 2 * np.pi / popt[2] + popt[3]) / (1 + 4 * niter)\n # amplitude_delta = angle * 2 / np.pi * pi_pulse_amplitude\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n labels[0]: amplitude_delta,\n labels[1]: corrected_amplitude,\n }\n )\n return data_fit\n\n\ndef drag_tunning_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n ],\n )\n\n beta_params = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n 0, # Offset: p[0]\n beta_params.values[np.argmax(voltages)]\n - beta_params.values[np.argmin(voltages)], # Amplitude: p[1]\n 4, # Period: p[2]\n 0.3, # Phase: p[3]\n ]\n\n try:\n popt, pcov = curve_fit(cos, beta_params.values, voltages.values)", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n t1 = abs(1 / popt[2])\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n labels[0]: t1,\n }\n )\n return data_fit\n\n\ndef flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n labels[1],\n ],\n )\n\n flips = data.get_values(*parse(x)) # Check X data stores. N flips or i?\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [0.0003, np.mean(voltages), -18, 0] # epsilon guess parameter\n else:\n pguess = [0.0003, np.mean(voltages), 18, 0] # epsilon guess parameter\n\n try:\n popt, pcov = curve_fit(flipping, flips, voltages, p0=pguess, maxfev=2000000)\n epsilon = -np.pi / popt[2]\n amplitude_delta = np.pi / (np.pi + epsilon)\n corrected_amplitude = amplitude_delta * pi_pulse_amplitude\n # angle = (niter * 2 * np.pi / popt[2] + popt[3]) / (1 + 4 * niter)\n # amplitude_delta = angle * 2 / np.pi * pi_pulse_amplitude\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n labels[0]: amplitude_delta,\n labels[1]: corrected_amplitude,\n }\n )\n return data_fit\n", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n t1 = abs(1 / popt[2])\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n labels[0]: t1,\n }\n )\n return data_fit\n\n\ndef flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n labels[1],\n ],", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n t1 = abs(1 / popt[2])\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n labels[0]: t1,\n }\n )\n return data_fit\n\n\ndef flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n labels[1],\n ],\n )\n\n flips = data.get_values(*parse(x)) # Check X data stores. N flips or i?\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 500e-9,\n ]\n\n try:\n popt, pcov = curve_fit(\n ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n delta_fitting = popt[2]\n delta_phys = int((delta_fitting * sampling_rate) - offset_freq)\n corrected_qubit_frequency = int(qubit_freq - delta_phys)\n t2 = 1.0 / popt[4]\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: delta_phys,\n labels[1]: corrected_qubit_frequency,\n labels[2]: t2,\n }\n )\n return data_fit\n\n\ndef t1_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n labels[0],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n max(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n else:\n pguess = [\n min(voltages.values),\n (max(voltages.values) - min(voltages.values)),\n 1 / 250,\n ]\n\n try:\n popt, pcov = curve_fit(\n exp, time.values, voltages.values, p0=pguess, maxfev=2000000\n )\n t1 = abs(1 / popt[2])\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n labels[0]: t1,\n }\n )\n return data_fit\n\n\ndef flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n labels[1],\n ],\n )\n\n flips = data.get_values(*parse(x)) # Check X data stores. N flips or i?\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [0.0003, np.mean(voltages), -18, 0] # epsilon guess parameter\n else:\n pguess = [0.0003, np.mean(voltages), 18, 0] # epsilon guess parameter\n\n try:\n popt, pcov = curve_fit(flipping, flips, voltages, p0=pguess, maxfev=2000000)\n epsilon = -np.pi / popt[2]\n amplitude_delta = np.pi / (np.pi + epsilon)\n corrected_amplitude = amplitude_delta * pi_pulse_amplitude\n # angle = (niter * 2 * np.pi / popt[2] + popt[3]) / (1 + 4 * niter)\n # amplitude_delta = angle * 2 / np.pi * pi_pulse_amplitude\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n labels[0]: amplitude_delta,\n labels[1]: corrected_amplitude,\n }\n )\n return data_fit\n\n\ndef drag_tunning_fit(data, x, y, qubit, nqubits, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n labels[0],\n ],\n )\n\n beta_params = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n pguess = [\n 0, # Offset: p[0]\n beta_params.values[np.argmax(voltages)]\n - beta_params.values[np.argmin(voltages)], # Amplitude: p[1]\n 4, # Period: p[2]\n 0.3, # Phase: p[3]\n ]\n\n try:\n popt, pcov = curve_fit(cos, beta_params.values, voltages.values)\n smooth_dataset = cos(beta_params.values, popt[0], popt[1], popt[2], popt[3])\n beta_optimal = beta_params.values[np.argmin(smooth_dataset)]\n\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n try:\n popt, pcov = curve_fit(\n rabi, time.values, voltages.values, p0=pguess, maxfev=10000\n )\n smooth_dataset = rabi(time.values, *popt)\n pi_pulse_duration = np.abs((1.0 / popt[2]) / 2)\n rabi_oscillations_pi_pulse_max_voltage = smooth_dataset.max() * 1e6\n t1 = 1.0 / popt[4] # double check T1\n except:\n log.warning(\"The fitting was not succesful\")\n return data_fit\n\n data_fit.add(\n {\n \"popt0\": popt[0],\n \"popt1\": popt[1],\n \"popt2\": popt[2],\n \"popt3\": popt[3],\n \"popt4\": popt[4],\n labels[0]: pi_pulse_duration,\n labels[1]: rabi_oscillations_pi_pulse_max_voltage,\n labels[2]: t1,\n }\n )\n return data_fit\n\n\ndef ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels):\n\n data_fit = Data(\n name=f\"fit_q{qubit}\",", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Routine-specific method for post-processing data acquired.\"\"\"\nimport lmfit\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom qibocal.config import log\nfrom qibocal.data import Data\nfrom qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey\n\n\ndef lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None):\n \"\"\"Fitting routine for resonator spectroscopy\"\"\"\n if fit_file_name == None:\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n else:\n data_fit = Data(\n name=fit_file_name + f\"_q{qubit}\",\n quantities=[\n \"fit_amplitude\",\n \"fit_center\",\n \"fit_sigma\",\n \"fit_offset\",\n labels[1],\n labels[0],\n ],\n )\n\n frequencies = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n # Create a lmfit model for fitting equation defined in resonator_peak\n model_Q = lmfit.Model(lorenzian)\n\n # Guess parameters for Lorentzian max or min\n if (nqubits == 1 and labels[0] == \"resonator_freq\") or (\n nqubits != 1 and labels[0] == \"qubit_freq\"\n ):\n guess_center = frequencies[\n np.argmax(voltages)\n ] # Argmax = Returns the indices of the maximum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center)\n guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi\n\n else:\n guess_center = frequencies[\n np.argmin(voltages)\n ] # Argmin = Returns the indices of the minimum values along an axis.\n guess_offset = np.mean(\n voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))]\n )\n guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center)\n guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi\n\n # Add guessed parameters to the model\n model_Q.set_param_hint(\"center\", value=guess_center, vary=True)\n model_Q.set_param_hint(\"sigma\", value=guess_sigma, vary=True)\n model_Q.set_param_hint(\"amplitude\", value=guess_amp, vary=True)\n model_Q.set_param_hint(\"offset\", value=guess_offset, vary=True)\n guess_parameters = model_Q.make_params()\n\n # fit the model with the data and guessed parameters\n try:\n fit_res = model_Q.fit(\n data=voltages, frequency=frequencies, params=guess_parameters\n )\n except:\n log.warning(\"The fitting was not successful\")\n return data_fit\n\n # get the values for postprocessing and for legend.\n f0 = fit_res.best_values[\"center\"]\n BW = fit_res.best_values[\"sigma\"] * 2\n Q = abs(f0 / BW)\n peak_voltage = (\n fit_res.best_values[\"amplitude\"] / (fit_res.best_values[\"sigma\"] * np.pi)\n + fit_res.best_values[\"offset\"]\n )\n\n freq = f0 * 1e6\n\n data_fit.add(\n {\n labels[1]: peak_voltage,\n labels[0]: freq,\n \"fit_amplitude\": fit_res.best_values[\"amplitude\"],\n \"fit_center\": fit_res.best_values[\"center\"],\n \"fit_sigma\": fit_res.best_values[\"sigma\"],\n \"fit_offset\": fit_res.best_values[\"offset\"],\n }\n )\n return data_fit\n\n\ndef rabi_fit(data, x, y, qubit, nqubits, labels):\n data_fit = Data(\n name=f\"fit_q{qubit}\",\n quantities=[\n \"popt0\",\n \"popt1\",\n \"popt2\",\n \"popt3\",\n \"popt4\",\n labels[0],\n labels[1],\n labels[2],\n ],\n )\n\n time = data.get_values(*parse(x))\n voltages = data.get_values(*parse(y))\n\n if nqubits == 1:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmin(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]\n else:\n pguess = [\n np.mean(voltages.values),\n np.max(voltages.values) - np.min(voltages.values),\n 0.5 / time.values[np.argmax(voltages.values)],\n np.pi / 2,\n 0.1e-6,\n ]", "type": "random" } ]
[ " data_fit.add(", " labels[0]: t1,", " data_fit = Data(", " rabi, time.values, voltages.values, p0=pguess, maxfev=10000", " smooth_dataset = rabi(time.values, *popt)", " voltages = data.get_values(*parse(y))", " labels[2]: t1,", " frequencies = data.get_values(*parse(x))", " popt, pcov = curve_fit(flipping, flips, voltages, p0=pguess, maxfev=2000000)", " model_Q = lmfit.Model(lorenzian)", " time = data.get_values(*parse(x))", " ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000", " beta_params = data.get_values(*parse(x))", " popt, pcov = curve_fit(cos, beta_params.values, voltages.values)", " smooth_dataset = cos(beta_params.values, popt[0], popt[1], popt[2], popt[3])", " exp, time.values, voltages.values, p0=pguess, maxfev=2000000", " t1 = abs(1 / popt[2])", "", " # Create a lmfit model for fitting equation defined in resonator_peak", " )", " pguess = [0.0003, np.mean(voltages), -18, 0] # epsilon guess parameter", " corrected_qubit_frequency = int(qubit_freq - delta_phys)", " {", " else:", " quantities=[", " try:" ]
METASEP
16
qiboteam__qibocal
qiboteam__qibocal METASEP doc/source/conf.py METASEP # -*- coding: utf-8 -*- # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys from recommonmark.transform import AutoStructify sys.path.insert(0, os.path.abspath("..")) import qcvv # -- Project information ----------------------------------------------------- project = "qcvv" copyright = "2022, The Qibo team" author = "The Qibo team" # The full version, including alpha/beta/rc tags release = qcvv.__version__ # -- General configuration --------------------------------------------------- # https://stackoverflow.com/questions/56336234/build-fail-sphinx-error-contents-rst-not-found master_doc = "index" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.intersphinx", "recommonmark", "sphinx_markdown_tables", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # Markdown configuration # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = {".rst": "restructuredtext", ".txt": "markdown", ".md": "markdown"} autosectionlabel_prefix_document = True # Allow to embed rst syntax in markdown files. enable_eval_rst = True # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # -- Intersphinx ------------------------------------------------------------- intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} # -- Autodoc ------------------------------------------------------------------ # autodoc_member_order = "bysource" # Adapted this from # https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/docs/conf.py # app setup hook def setup(app): app.add_config_value("recommonmark_config", {"enable_eval_rst": True}, True) app.add_transform(AutoStructify) serverscripts/qcvv-update-on-change.py METASEP #!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import curio import inotify.adapters import inotify.constants from curio import subprocess async def main(folder, exe_args): i = inotify.adapters.Inotify() i.add_watch(folder) for event in i.event_gen(yield_nones=False): if event is not None: (header, _, _, _) = event if ( (header.mask & inotify.constants.IN_CREATE) or (header.mask & inotify.constants.IN_DELETE) or (header.mask & inotify.constants.IN_MODIFY) ): await subprocess.run(exe_args) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("folder") parser.add_argument("exe_args", nargs="+") args = parser.parse_args() curio.run(main(args.folder, args.exe_args)) serverscripts/qcvv-index.reports.py METASEP # -*- coding: utf-8 -*- """qcvv-index-reports.py Generates a JSON index with reports information. """ import json import pathlib import sys from collections import ChainMap import yaml ROOT = "/home/users/qcvv/qcvv-reports" ROOT_URL = "http://login.qrccluster.com:9000/" OUT = "/home/users/qcvv/qcvv-reports/index.json" DEFAULTS = { "title": "-", "date": "-", "platform": "-", "start-time": "-", "end-time": "-", } REQUIRED_FILE_METADATA = {"title", "date", "platform", "start-time" "end-time"} def meta_from_path(p): meta = ChainMap(DEFAULTS) yaml_meta = p / "meta.yml" yaml_res = {} if yaml_meta.exists(): with yaml_meta.open() as f: try: yaml_res = yaml.safe_load(f) except yaml.YAMLError as e: print(f"Error processing {yaml_meta}: {e}", file=sys.stderr) meta = meta.new_child(yaml_res) return meta def register(p): path_meta = meta_from_path(p) title, date, platform, start_time, end_time = ( path_meta["title"], path_meta["date"], path_meta["platform"], path_meta["start-time"], path_meta["end-time"], ) url = ROOT_URL + p.name titlelink = f'<a href="{url}">{title}</a>' return (titlelink, date, platform, start_time, end_time) def make_index(): root_path = pathlib.Path(ROOT) data = [] for p in root_path.iterdir(): if p.is_dir(): try: res = register(p) data.append(res) except: print("Error processing folder", p, file=sys.stderr) raise with open(OUT, "w") as f: json.dump({"data": data}, f) if __name__ == "__main__": make_index() src/qcvv/web/server.py METASEP # -*- coding: utf-8 -*- import os import pathlib import yaml from flask import Flask, render_template from qcvv import __version__ from qcvv.cli.builders import ReportBuilder server = Flask(__name__) @server.route("/") @server.route("/data/<path>") def page(path=None): folders = [ folder for folder in reversed(sorted(os.listdir(os.getcwd()))) if os.path.isdir(folder) and "meta.yml" in os.listdir(folder) ] report = None if path is not None: try: report = ReportBuilder(path) except (FileNotFoundError, TypeError): pass return render_template( "template.html", version=__version__, folders=folders, report=report, ) src/qcvv/web/report.py METASEP # -*- coding: utf-8 -*- import os import pathlib from jinja2 import Environment, FileSystemLoader from qcvv import __version__ from qcvv.cli.builders import ReportBuilder def create_report(path): """Creates an HTML report for the data in the given path.""" filepath = pathlib.Path(__file__) with open(os.path.join(filepath.with_name("static"), "styles.css"), "r") as file: css_styles = f"<style>\n{file.read()}\n</style>" report = ReportBuilder(path) env = Environment(loader=FileSystemLoader(filepath.with_name("templates"))) template = env.get_template("template.html") html = template.render( is_static=True, css_styles=css_styles, version=__version__, report=report, ) with open(os.path.join(path, "index.html"), "w") as file: file.write(html) src/qcvv/web/app.py METASEP # -*- coding: utf-8 -*- import os import pandas as pd import yaml from dash import Dash, Input, Output, dcc, html from qcvv import plots from qcvv.data import Dataset from qcvv.web.server import server Dataset() # dummy dataset call to suppress ``pint[V]`` error app = Dash( server=server, suppress_callback_exceptions=True, ) app.layout = html.Div( [ dcc.Location(id="url", refresh=False), dcc.Graph(id="graph", figure={}), dcc.Interval( id="interval", # TODO: Perhaps the user should be allowed to change the refresh rate interval=1000, n_intervals=0, disabled=False, ), ] ) @app.callback( Output("graph", "figure"), Input("interval", "n_intervals"), Input("graph", "figure"), Input("url", "pathname"), ) def get_graph(n, current_figure, url): method, folder, routine, qubit, format = url.split(os.sep)[2:] try: # data = Dataset.load_data(folder, routine, format, "precision_sweep") # with open(f"{folder}/platform.yml", "r") as f: # nqubits = yaml.safe_load(f)["nqubits"] # if len(data) > 2: # params, fit = resonator_spectroscopy_fit(folder, format, nqubits) # else: # params, fit = None, None # return getattr(plots.resonator_spectroscopy, method)(data, params, fit) # # FIXME: Temporarily hardcode the plotting method to test # # multiple routines with different names in one folder # # should be changed to: # # return getattr(getattr(plots, routine), method)(data) return getattr(plots, method)(folder, routine, qubit, format) except (FileNotFoundError, pd.errors.EmptyDataError): return current_figure src/qcvv/web/__init__.py METASEP src/qcvv/tests/test_data.py METASEP # -*- coding: utf-8 -*- """Some tests for the Dataset class""" import tempfile import numpy as np import pytest from pint import DimensionalityError, UndefinedUnitError from qcvv.data import Dataset def random_dataset(length): data = Dataset() for _ in range(length): msr, i, q, phase = np.random.rand(len(data.df.columns)) data.add({"MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) return data def test_data_initialization(): """Test Dataset constructor""" data = Dataset() assert len(data.df.columns) == 4 assert list(data.df.columns) == ["MSR", "i", "q", "phase"] data1 = Dataset(quantities={"attenuation": "dB"}) assert len(data1.df.columns) == 5 assert list(data1.df.columns) == ["attenuation", "MSR", "i", "q", "phase"] def test_units(): """Test units of measure in Dataset""" data = Dataset() assert data.df.MSR.values.units == "volt" data1 = Dataset(quantities={"frequency": "Hz"}) assert data1.df.frequency.values.units == "hertz" with pytest.raises(UndefinedUnitError): data2 = Dataset(quantities={"fake_unit": "fake"}) def test_add(): """Test add method of Dataset""" data = random_dataset(5) assert len(data) == 5 data1 = Dataset(quantities={"attenuation": "dB"}) msr, i, q, phase, att = np.random.rand(len(data1.df.columns)) data1.add( { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "attenuation[dB]": att, } ) assert len(data1) == 1 data1.add( { "MSR[V]": 0, "i[V]": 0.0, "q[V]": 0.0, "phase[deg]": 0, "attenuation[dB]": 1, } ) assert len(data1) == 2 data2 = Dataset() msr, i, q, phase = np.random.rand(len(data2.df.columns)) with pytest.raises(DimensionalityError): data2.add({"MSR[dB]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) with pytest.raises(UndefinedUnitError): data2.add({"MSR[test]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) src/qcvv/plots/scatters.py METASEP # -*- coding: utf-8 -*- import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots from qcvv.data import Data, Dataset from qcvv.fitting.utils import lorenzian def frequency_msr_phase__fast_precision(folder, routine, qubit, format): try: data_fast = Dataset.load_data(folder, routine, format, f"fast_sweep_q{qubit}") except: data_fast = Dataset(quantities={"frequency": "Hz"}) try: data_precision = Dataset.load_data( folder, routine, format, f"precision_sweep_q{qubit}" ) except: data_precision = Dataset(quantities={"frequency": "Hz"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Data( quantities=[ "fit_amplitude", "fit_center", "fit_sigma", "fit_offset", "label1", "label2", ] ) fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data_fast.get_values("frequency", "GHz"), y=data_fast.get_values("MSR", "uV"), name="Fast", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_fast.get_values("frequency", "GHz"), y=data_fast.get_values("phase", "rad"), name="Fast", ), row=1, col=2, ) fig.add_trace( go.Scatter( x=data_precision.get_values("frequency", "GHz"), y=data_precision.get_values("MSR", "uV"), name="Precision", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_precision.get_values("frequency", "GHz"), y=data_precision.get_values("phase", "rad"), name="Precision", ), row=1, col=2, ) if len(data_fast) > 0 and len(data_fit) > 0: freqrange = np.linspace( min(data_fast.get_values("frequency", "GHz")), max(data_fast.get_values("frequency", "GHz")), 20, ) params = [i for i in list(data_fit.df.keys()) if "fit" not in i] fig.add_trace( go.Scatter( x=freqrange, y=lorenzian( freqrange, data_fit.df["fit_amplitude"][0], data_fit.df["fit_center"][0], data_fit.df["fit_sigma"][0], data_fit.df["fit_offset"][0], ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="MSR (uV)", xaxis2_title="Frequency (GHz)", yaxis2_title="Phase (rad)", ) return fig def frequency_attenuation_msr_phase__cut(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot fig = go.Figure() # index data on a specific attenuation value smalldf = data.df[data.get_values("attenuation", "dB") == plot1d_attenuation].copy() # split multiple software averages to different datasets datasets = [] while len(smalldf): datasets.append(smalldf.drop_duplicates("frequency")) smalldf.drop(datasets[-1].index, inplace=True) fig.add_trace( go.Scatter( x=datasets[-1]["frequency"].pint.to("GHz").pint.magnitude, y=datasets[-1]["MSR"].pint.to("V").pint.magnitude, ), ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting, xaxis_title="Frequency (GHz)", yaxis_title="MSR (V)", ) return fig src/qcvv/plots/heatmaps.py METASEP # -*- coding: utf-8 -*- import os.path import plotly.graph_objects as go from plotly.subplots import make_subplots from qcvv.data import Dataset def frequency_flux_msr_phase(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="Current (A)", xaxis2_title="Frequency (GHz)", yaxis2_title="Current (A)", ) return fig def frequency_attenuation_msr_phase(folder, routine, qubit, format): data = Dataset.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("attenuation", "dB"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("attenuation", "dB"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="Attenuation (dB)", xaxis2_title="Frequency (GHz)", yaxis2_title="Attenuation (dB)", ) return fig def frequency_flux_msr_phase__matrix(folder, routine, qubit, format): fluxes = [] for i in range(25): # FIXME: 25 is hardcoded file = f"{folder}/data/{routine}/data_q{qubit}_f{i}.csv" if os.path.exists(file): fluxes += [i] if len(fluxes) < 1: nb = 1 else: nb = len(fluxes) fig = make_subplots( rows=2, cols=nb, horizontal_spacing=0.1, vertical_spacing=0.1, x_title="Frequency (Hz)", y_title="Current (A)", shared_xaxes=True, shared_yaxes=True, ) for j in fluxes: if j == fluxes[-1]: showscale = True else: showscale = False data = Dataset.load_data(folder, routine, format, f"data_q{qubit}_f{j}") fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("MSR", "V"), showscale=showscale, ), row=1, col=j, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("phase", "rad"), showscale=showscale, ), row=2, col=j, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting ) return fig src/qcvv/plots/__init__.py METASEP # -*- coding: utf-8 -*- from qcvv.plots.heatmaps import * from qcvv.plots.scatters import * src/qcvv/fitting/utils.py METASEP # -*- coding: utf-8 -*- import re import numpy as np def lorenzian(frequency, amplitude, center, sigma, offset): # http://openafox.com/science/peak-function-derivations.html return (amplitude / np.pi) * ( sigma / ((frequency - center) ** 2 + sigma**2) ) + offset def parse(key): name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) return name, unit src/qcvv/fitting/methods.py METASEP # -*- coding: utf-8 -*- """Routine-specific method for post-processing data acquired.""" import lmfit import numpy as np from qcvv.config import log from qcvv.data import Data from qcvv.fitting.utils import lorenzian, parse def lorentzian_fit(data, x, y, qubit, nqubits, labels): """Fitting routine for resonator spectroscopy""" data_fit = Data( name=f"fit_q{qubit}", quantities=[ "fit_amplitude", "fit_center", "fit_sigma", "fit_offset", labels[1], labels[0], ], ) frequencies = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) # Create a lmfit model for fitting equation defined in resonator_peak model_Q = lmfit.Model(lorenzian) # Guess parameters for Lorentzian max or min if (nqubits == 1 and labels[0] == "resonator_freq") or ( nqubits != 1 and labels[0] == "qubit_freq" ): guess_center = frequencies[ np.argmax(voltages) ] # Argmax = Returns the indices of the maximum values along an axis. guess_offset = np.mean( voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))] ) guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center) guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi else: guess_center = frequencies[ np.argmin(voltages) ] # Argmin = Returns the indices of the minimum values along an axis. guess_offset = np.mean( voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))] ) guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center) guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi # Add guessed parameters to the model model_Q.set_param_hint("center", value=guess_center, vary=True) model_Q.set_param_hint("sigma", value=guess_sigma, vary=True) model_Q.set_param_hint("amplitude", value=guess_amp, vary=True) model_Q.set_param_hint("offset", value=guess_offset, vary=True) guess_parameters = model_Q.make_params() # fit the model with the data and guessed parameters try: fit_res = model_Q.fit( data=voltages, frequency=frequencies, params=guess_parameters ) except: log.warning("The fitting was not successful") return data_fit # get the values for postprocessing and for legend. f0 = fit_res.best_values["center"] BW = fit_res.best_values["sigma"] * 2 Q = abs(f0 / BW) peak_voltage = ( fit_res.best_values["amplitude"] / (fit_res.best_values["sigma"] * np.pi) + fit_res.best_values["offset"] ) freq = f0 * 1e6 data_fit.add( { labels[1]: peak_voltage, labels[0]: freq, "fit_amplitude": fit_res.best_values["amplitude"], "fit_center": fit_res.best_values["center"], "fit_sigma": fit_res.best_values["sigma"], "fit_offset": fit_res.best_values["offset"], } ) return data_fit # params = resonator_freq, peak_voltage # for keys in fit_res.best_values: # fit_res.best_values[keys] = float(fit_res.best_values[keys]) # with open(f"{folder}/data/resonator_spectroscopy/fit.yml", "w+") as file: # yaml.dump( # fit_res.best_values, # file, # sort_keys=False, # indent=4, # default_flow_style=None, # ) # return params, fit_res.best_values src/qcvv/fitting/__init__.py METASEP src/qcvv/cli/builders.py METASEP # -*- coding: utf-8 -*- import datetime import inspect import os import shutil import yaml from qcvv import calibrations from qcvv.config import log, raise_error from qcvv.data import Data def load_yaml(path): """Load yaml file from disk.""" with open(path, "r") as file: data = yaml.safe_load(file) return data class ActionBuilder: """Class for parsing and executing runcards. Args: runcard (path): path containing the runcard. folder (path): path for the output folder. force (bool): option to overwrite the output folder if it exists already. """ def __init__(self, runcard, folder=None, force=False): path, self.folder = self._generate_output_folder(folder, force) self.runcard = load_yaml(runcard) platform_name = self.runcard["platform"] self._allocate_platform(platform_name) self.qubits = self.runcard["qubits"] self.format = self.runcard["format"] # Saving runcard self.save_runcards(path, runcard) self.save_meta(path, self.folder, platform_name) @staticmethod def _generate_output_folder(folder, force): """Static method for generating the output folder. Args: folder (path): path for the output folder. If None it will be created a folder automatically force (bool): option to overwrite the output folder if it exists already. """ if folder is None: import getpass e = datetime.datetime.now() user = getpass.getuser().replace(".", "-") date = e.strftime("%Y-%m-%d") folder = f"{date}-{'000'}-{user}" num = 0 while os.path.exists(folder): log.warning(f"Directory {folder} already exists.") num += 1 folder = f"{date}-{str(num).rjust(3, '0')}-{user}" log.warning(f"Trying to create directory {folder}") elif os.path.exists(folder) and not force: raise_error(RuntimeError, f"Directory {folder} already exists.") elif os.path.exists(folder) and force: log.warning(f"Deleting previous directory {folder}.") shutil.rmtree(os.path.join(os.getcwd(), folder)) path = os.path.join(os.getcwd(), folder) log.info(f"Creating directory {folder}.") os.makedirs(path) return path, folder def _allocate_platform(self, platform_name): """Allocate the platform using Qibolab.""" from qibo.backends import construct_backend self.platform = construct_backend("qibolab", platform=platform_name).platform def save_runcards(self, path, runcard): """Save the output runcards.""" from qibolab.paths import qibolab_folder platform_runcard = ( qibolab_folder / "runcards" / f"{self.runcard['platform']}.yml" ) shutil.copy(platform_runcard, f"{path}/platform.yml") shutil.copy(runcard, f"{path}/runcard.yml") def save_meta(self, path, folder, platform_name): import qibo import qibolab import qcvv e = datetime.datetime.now(datetime.timezone.utc) meta = {} meta["title"] = folder meta["platform"] = platform_name meta["date"] = e.strftime("%Y-%m-%d") meta["start-time"] = e.strftime("%H:%M:%S") meta["end-time"] = e.strftime("%H:%M:%S") meta["versions"] = { "qibo": qibo.__version__, "qibolab": qibolab.__version__, "qcvv": qcvv.__version__, } with open(f"{path}/meta.yml", "w") as file: yaml.dump(meta, file) def _build_single_action(self, name): """Helper method to parse the actions in the runcard.""" f = getattr(calibrations, name) path = os.path.join(self.folder, f"data/{name}/") os.makedirs(path) sig = inspect.signature(f) params = self.runcard["actions"][name] for param in list(sig.parameters)[2:-1]: if param not in params: raise_error(AttributeError, f"Missing parameter {param} in runcard.") return f, params, path def execute(self): """Method to execute sequentially all the actions in the runcard.""" self.platform.connect() self.platform.setup() self.platform.start() for action in self.runcard["actions"]: routine, args, path = self._build_single_action(action) self._execute_single_action(routine, args, path) self.platform.stop() self.platform.disconnect() def _execute_single_action(self, routine, arguments, path): """Method to execute a single action and retrieving the results.""" for qubit in self.qubits: results = routine(self.platform, qubit, **arguments) if self.format is None: raise_error( ValueError, f"Cannot store data using {self.format} format." ) for data in results: getattr(data, f"to_{self.format}")(path) self.update_platform_runcard(qubit, routine.__name__) def update_platform_runcard(self, qubit, routine): try: data_fit = Data.load_data( self.folder, routine, self.format, f"fit_q{qubit}" ) except: data_fit = Data() params = [i for i in list(data_fit.df.keys()) if "fit" not in i] settings = load_yaml(f"{self.folder}/platform.yml") for param in params: settings["characterization"]["single_qubit"][qubit][param] = int( data_fit.df[param][0] ) with open(f"{self.folder}/data/{routine}/platform.yml", "a+") as file: yaml.dump( settings, file, sort_keys=False, indent=4, default_flow_style=None ) def dump_report(self): from qcvv.web.report import create_report # update end time meta = load_yaml(f"{self.folder}/meta.yml") e = datetime.datetime.now(datetime.timezone.utc) meta["end-time"] = e.strftime("%H:%M:%S") with open(f"{self.folder}/meta.yml", "w") as file: yaml.dump(meta, file) create_report(self.folder) class ReportBuilder: """Parses routines and plots to report and live plotting page. Args: path (str): Path to the data folder to generate report for. """ def __init__(self, path): self.path = path self.metadata = load_yaml(os.path.join(path, "meta.yml")) # find proper path title base, self.title = os.path.join(os.getcwd(), path), "" while self.title in ("", "."): base, self.title = os.path.split(base) self.runcard = load_yaml(os.path.join(path, "runcard.yml")) self.format = self.runcard.get("format") self.qubits = self.runcard.get("qubits") # create calibration routine objects # (could be incorporated to :meth:`qcvv.cli.builders.ActionBuilder._build_single_action`) self.routines = [] for action in self.runcard.get("actions"): routine = getattr(calibrations, action) if not hasattr(routine, "plots"): routine.plots = [] self.routines.append(routine) def get_routine_name(self, routine): """Prettify routine's name for report headers.""" return routine.__name__.replace("_", " ").title() def get_figure(self, routine, method, qubit): """Get html figure for report. Args: routine (Callable): Calibration method. method (Callable): Plot method. qubit (int): Qubit id. """ import tempfile figure = method(self.path, routine.__name__, qubit, self.format) with tempfile.NamedTemporaryFile() as temp: figure.write_html(temp.name, include_plotlyjs=False, full_html=False) fightml = temp.read().decode("utf-8") return fightml def get_live_figure(self, routine, method, qubit): """Get url to dash page for live plotting. This url is used by :meth:`qcvv.web.app.get_graph`. Args: routine (Callable): Calibration method. method (Callable): Plot method. qubit (int): Qubit id. """ return os.path.join( method.__name__, self.path, routine.__name__, str(qubit), self.format, ) src/qcvv/cli/_base.py METASEP # -*- coding: utf-8 -*- """Adds global CLI options.""" import base64 import pathlib import shutil import socket import subprocess import uuid from urllib.parse import urljoin import click from qibo.config import log, raise_error from qcvv.cli.builders import ActionBuilder CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) # options for report upload UPLOAD_HOST = ( "qcvv@localhost" if socket.gethostname() == "saadiyat" else "[email protected]" ) TARGET_DIR = "qcvv-reports/" ROOT_URL = "http://login.qrccluster.com:9000/" @click.command(context_settings=CONTEXT_SETTINGS) @click.argument("runcard", metavar="RUNCARD", type=click.Path(exists=True)) @click.option( "folder", "-o", type=click.Path(), help="Output folder. If not provided a standard name will generated.", ) @click.option( "force", "-f", is_flag=True, help="Use --force option to overwrite the output folder.", ) def command(runcard, folder, force=None): """qcvv: Quantum Calibration Verification and Validation using Qibo. Arguments: - RUNCARD: runcard with declarative inputs. """ action_builder = ActionBuilder(runcard, folder, force) action_builder.execute() action_builder.dump_report() @click.command(context_settings=CONTEXT_SETTINGS) @click.option( "port", "-p", "--port", default=8050, type=int, help="Localhost port to launch dash server.", ) @click.option( "debug", "-d", "--debug", is_flag=True, help="Launch server in debugging mode.", ) def live_plot(port, debug): """Real time plotting of calibration data on a dash server.""" import socket from qcvv.web.app import app # change port if it is already used while True: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: if s.connect_ex(("localhost", port)) != 0: break port += 1 app.run_server(debug=debug, port=port) @click.command(context_settings=CONTEXT_SETTINGS) @click.argument("output_folder", metavar="FOLDER", type=click.Path(exists=True)) def upload(output_folder): """Uploads output folder to server""" output_path = pathlib.Path(output_folder) # check the rsync command exists. if not shutil.which("rsync"): raise_error( RuntimeError, "Could not find the rsync command. Please make sure it is installed.", ) # check that we can authentica with a certificate ssh_command_line = ( "ssh", "-o", "PreferredAuthentications=publickey", "-q", UPLOAD_HOST, "exit", ) str_line = " ".join(repr(ele) for ele in ssh_command_line) log.info(f"Checking SSH connection to {UPLOAD_HOST}.") try: subprocess.run(ssh_command_line, check=True) except subprocess.CalledProcessError as e: raise RuntimeError( ( "Could not validate the SSH key. " "The command\n%s\nreturned a non zero exit status. " "Please make sure that your public SSH key is on the server." ) % str_line ) from e except OSError as e: raise RuntimeError( "Could not run the command\n{}\n: {}".format(str_line, e) ) from e log.info("Connection seems OK.") # upload output randname = base64.urlsafe_b64encode(uuid.uuid4().bytes).decode() newdir = TARGET_DIR + randname rsync_command = ( "rsync", "-aLz", "--chmod=ug=rwx,o=rx", f"{output_path}/", f"{UPLOAD_HOST}:{newdir}", ) log.info(f"Uploading output ({output_path}) to {UPLOAD_HOST}") try: subprocess.run(rsync_command, check=True) except subprocess.CalledProcessError as e: msg = f"Failed to upload output: {e}" raise RuntimeError(msg) from e url = urljoin(ROOT_URL, randname) log.info(f"Upload completed. The result is available at:\n{url}") src/qcvv/cli/__init__.py METASEP # -*- coding: utf-8 -*- """CLI entry point.""" from ._base import command, live_plot, upload src/qcvv/decorators.py METASEP # -*- coding: utf-8 -*- """Decorators implementation.""" import os from qcvv.config import raise_error def plot(header, method): """Decorator for adding plots in the report and live plotting page. Args: header (str): Header of the plot to use in the report. method (Callable): Plotting method defined under ``qcvv.plots``. """ def wrapped(f): if hasattr(f, "plots"): # insert in the beginning of the list to have # proper plot ordering in the report f.plots.insert(0, (header, method)) else: f.plots = [(header, method)] return f return wrapped src/qcvv/data.py METASEP # -*- coding: utf-8 -*- """Implementation of Dataset class to store measurements.""" from abc import abstractmethod import pandas as pd import pint_pandas from qcvv.config import raise_error class AbstractDataset: def __init__(self, name=None): if name is None: self.name = "data" else: self.name = name self.df = pd.DataFrame() def __add__(self, data): self.df = pd.concat([self.df, data.df], ignore_index=True) return self @abstractmethod def add(self, data): raise_error(NotImplementedError) def __len__(self): """Computes the length of the dataset.""" return len(self.df) @abstractmethod def load_data(cls, folder, routine, format, name): raise_error(NotImplementedError) @abstractmethod def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" if self.quantities == None: self.df.to_csv(f"{path}/{self.name}.csv") else: self.df.pint.dequantify().to_csv(f"{path}/{self.name}.csv") def to_pickle(self, path): """Save data in pickel file. Args: path (str): Path containing output folder.""" self.df.to_pickle(f"{path}/{self.name}.pkl") class Dataset(AbstractDataset): """Class to store the data measured during the calibration routines. It is a wrapper to a pandas DataFrame with units of measure from the Pint library. Args: quantities (dict): dictionary containing additional quantities that the user may save other than the pulse sequence output. The keys are the name of the quantities and the corresponding values are the units of measure. """ def __init__(self, name=None, quantities=None): super().__init__(name=name) self.df = pd.DataFrame( { "MSR": pd.Series(dtype="pint[V]"), "i": pd.Series(dtype="pint[V]"), "q": pd.Series(dtype="pint[V]"), "phase": pd.Series(dtype="pint[deg]"), } ) self.quantities = {"MSR": "V", "i": "V", "q": "V", "phase": "deg"} if quantities is not None: self.quantities.update(quantities) for name, unit in quantities.items(): self.df.insert(0, name, pd.Series(dtype=f"pint[{unit}]")) def add(self, data): """Add a row to dataset. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ import re from pint import UnitRegistry ureg = UnitRegistry() l = len(self) for key, value in data.items(): name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) # TODO: find a better way to do this self.df.loc[l, name] = value * ureg(unit) def get_values(self, quantity, unit): """Get values of a quantity in specified units. Args: quantity (str): Quantity to get the values of. unit (str): Unit of the returned values. Returns: ``pd.Series`` with the quantity values in the given units. """ return self.df[quantity].pint.to(unit).pint.magnitude @classmethod def load_data(cls, folder, routine, format, name): """Load data from specific format. Args: folder (path): path to the output folder from which the data will be loaded routine (str): calibration routine data to be loaded format (str): data format. Possible choices are 'csv' and 'pickle'. Returns: dataset (``Dataset``): dataset object with the loaded data. """ obj = cls() if format == "csv": file = f"{folder}/data/{routine}/{name}.csv" obj.df = pd.read_csv(file, header=[0, 1]) obj.df = obj.df.pint.quantify(level=-1) obj.df.pop("Unnamed: 0_level_0") elif format == "pickle": file = f"{folder}/data/{routine}/{name}.pkl" obj.df = pd.read_pickle(file) else: raise_error(ValueError, f"Cannot load data using {format} format.") return obj def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" self.df.pint.dequantify().to_csv(f"{path}/{self.name}.csv") class Data(AbstractDataset): """Class to store the data obtained from calibration routines. It is a wrapper to a pandas DataFrame. Args: quantities (dict): dictionary quantities to be saved. """ def __init__(self, name=None, quantities=None): super().__init__(name=name) if quantities is not None: self.quantities = quantities for name in quantities: self.df.insert(0, name, pd.Series(dtype=object)) def add(self, data): """Add a row to dataset. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ l = len(self) for key, value in data.items(): self.df.loc[l, key] = value def get_values(self, quantity): """Get values of a quantity in specified units. Args: quantity (str): Quantity to get the values of. Returns: ``pd.Series`` with the quantity values in the given units. """ return self.df[quantity] @classmethod def load_data(cls, folder, routine, format, name): """Load data from specific format. Args: folder (path): path to the output folder from which the data will be loaded routine (str): calibration routine data to be loaded format (str): data format. Possible choices are 'csv' and 'pickle'. Returns: dataset (``Dataset``): dataset object with the loaded data. """ obj = cls() if format == "csv": file = f"{folder}/data/{routine}/{name}.csv" obj.df = pd.read_csv(file) obj.df.pop("Unnamed: 0") elif format == "pickle": file = f"{folder}/data/{routine}/{name}.pkl" obj.df = pd.read_pickle(file) else: raise_error(ValueError, f"Cannot load data using {format} format.") return obj def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" self.df.to_csv(f"{path}/{self.name}.csv") def to_pickle(self, path): """Save data in pickel file. Args: path (str): Path containing output folder.""" self.df.to_pickle(f"{path}/{self.name}.pkl") src/qcvv/config.py METASEP # -*- coding: utf-8 -*- """Custom logger implemenation.""" import logging import os # Logging level from 0 (all) to 4 (errors) (see https://docs.python.org/3/library/logging.html#logging-levels) QCVV_LOG_LEVEL = 1 if "QCVV_LOG_LEVEL" in os.environ: # pragma: no cover QCVV_LOG_LEVEL = 10 * int(os.environ.get("QCVV_LOG_LEVEL")) def raise_error(exception, message=None, args=None): """Raise exception with logging error. Args: exception (Exception): python exception. message (str): the error message. """ log.error(message) if args: raise exception(message, args) else: raise exception(message) # Configuration for logging mechanism class CustomHandler(logging.StreamHandler): """Custom handler for logging algorithm.""" def format(self, record): """Format the record with specific format.""" from qcvv import __version__ fmt = f"[Qcvv {__version__}|%(levelname)s|%(asctime)s]: %(message)s" return logging.Formatter(fmt, datefmt="%Y-%m-%d %H:%M:%S").format(record) # allocate logger object log = logging.getLogger(__name__) log.setLevel(QCVV_LOG_LEVEL) log.addHandler(CustomHandler()) src/qcvv/__init__.py METASEP # -*- coding: utf-8 -*- from .cli import command, live_plot, upload """qcvv: Quantum Calibration Verification and Validation using Qibo.""" import importlib.metadata as im __version__ = im.version(__package__) src/qcvv/calibrations/utils.py METASEP # -*- coding: utf-8 -*- import numpy as np def variable_resolution_scanrange( lowres_width, lowres_step, highres_width, highres_step ): """Helper function for sweeps.""" return np.concatenate( ( np.arange(-lowres_width, -highres_width, lowres_step), np.arange(-highres_width, highres_width, highres_step), np.arange(highres_width, lowres_width, lowres_step), ) ) src/qcvv/calibrations/resonator_spectroscopy.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qcvv import plots from qcvv.calibrations.utils import variable_resolution_scanrange from qcvv.data import Dataset from qcvv.decorators import plot from qcvv.fitting.methods import lorentzian_fit @plot("MSR and Phase vs Frequency", plots.frequency_msr_phase__fast_precision) def resonator_spectroscopy( platform: AbstractPlatform, qubit, lowres_width, lowres_step, highres_width, highres_step, precision_width, precision_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( variable_resolution_scanrange( lowres_width, lowres_step, highres_width, highres_step ) + resonator_frequency ) fast_sweep_data = Dataset( name=f"fast_sweep_q{qubit}", quantities={"frequency": "Hz"} ) count = 0 for _ in range(software_averages): for freq in frequency_range: if count % points == 0 and count > 0: yield fast_sweep_data yield lorentzian_fit( fast_sweep_data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } fast_sweep_data.add(results) count += 1 yield fast_sweep_data # FIXME: have live ploting work for multiple datasets saved if platform.resonator_type == "3D": resonator_frequency = fast_sweep_data.df.frequency[ fast_sweep_data.df.MSR.index[fast_sweep_data.df.MSR.argmax()] ].magnitude avg_voltage = ( np.mean(fast_sweep_data.df.MSR.values[: (lowres_width // lowres_step)]) * 1e6 ) else: resonator_frequency = fast_sweep_data.df.frequency[ fast_sweep_data.df.MSR.index[fast_sweep_data.df.MSR.argmin()] ].magnitude avg_voltage = ( np.mean(fast_sweep_data.df.MSR.values[: (lowres_width // lowres_step)]) * 1e6 ) precision_sweep__data = Dataset( name=f"precision_sweep_q{qubit}", quantities={"frequency": "Hz"} ) freqrange = ( np.arange(-precision_width, precision_width, precision_step) + resonator_frequency ) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield precision_sweep__data yield lorentzian_fit( fast_sweep_data + precision_sweep__data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } precision_sweep__data.add(results) count += 1 yield precision_sweep__data @plot("Frequency vs Attenuation", plots.frequency_attenuation_msr_phase) @plot("MSR vs Frequency", plots.frequency_attenuation_msr_phase__cut) def resonator_punchout( platform: AbstractPlatform, qubit, freq_width, freq_step, min_att, max_att, step_att, software_averages, points=10, ): platform.reload_settings() data = Dataset( name=f"data_q{qubit}", quantities={"frequency": "Hz", "attenuation": "dB"} ) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence = PulseSequence() sequence.add(ro_pulse) # TODO: move this explicit instruction to the platform resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency - (freq_width / 4) ) attenuation_range = np.flip(np.arange(min_att, max_att, step_att)) count = 0 for _ in range(software_averages): for att in attenuation_range: for freq in frequency_range: if count % points == 0: yield data # TODO: move these explicit instructions to the platform platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.ro_port[qubit].attenuation = att msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr * (np.exp(att / 10)), "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "attenuation[dB]": att, } # TODO: implement normalization data.add(results) count += 1 yield data @plot("MSR and Phase vs Flux Current", plots.frequency_flux_msr_phase) def resonator_spectroscopy_flux( platform: AbstractPlatform, qubit, freq_width, freq_step, current_max, current_min, current_step, software_averages, fluxline=0, points=10, ): platform.reload_settings() if fluxline == "qubit": fluxline = qubit sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) data = Dataset( name=f"data_q{qubit}", quantities={"frequency": "Hz", "current": "A"} ) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] qubit_biasing_current = platform.characterization["single_qubit"][qubit][ "sweetspot" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) current_range = ( np.arange(current_min, current_max, current_step) + qubit_biasing_current ) count = 0 for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data # TODO: automatically extract the sweet spot current # TODO: add a method to generate the matrix @plot("MSR row 1 and Phase row 2", plots.frequency_flux_msr_phase__matrix) def resonator_spectroscopy_flux_matrix( platform: AbstractPlatform, qubit, freq_width, freq_step, current_min, current_max, current_step, fluxlines, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) current_range = np.arange(current_min, current_max, current_step) count = 0 for fluxline in fluxlines: fluxline = int(fluxline) print(fluxline) data = Dataset( name=f"data_q{qubit}_f{fluxline}", quantities={"frequency": "Hz", "current": "A"}, ) for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data src/qcvv/calibrations/qubit_spectroscopy.py METASEP # -*- coding: utf-8 -*- import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qcvv import plots from qcvv.data import Dataset from qcvv.decorators import plot from qcvv.fitting.methods import lorentzian_fit @plot("MSR and Phase vs Frequency", plots.frequency_msr_phase__fast_precision) def qubit_spectroscopy( platform: AbstractPlatform, qubit, fast_start, fast_end, fast_step, precision_start, precision_end, precision_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=5000) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=5000) sequence.add(qd_pulse) sequence.add(ro_pulse) qubit_frequency = platform.characterization["single_qubit"][qubit]["qubit_freq"] freqrange = np.arange(fast_start, fast_end, fast_step) + qubit_frequency data = Dataset(quantities={"frequency": "Hz", "attenuation": "dB"}) # FIXME: Waiting for Qblox platform to take care of that platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) data = Dataset(name=f"fast_sweep_q{qubit}", quantities={"frequency": "Hz"}) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield data yield lorentzian_fit( data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["qubit_freq", "peak_voltage"], ) platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } data.add(results) count += 1 yield data if platform.resonator_type == "3D": qubit_frequency = data.df.frequency[ data.df.MSR.index[data.df.MSR.argmin()] ].magnitude avg_voltage = ( np.mean(data.df.MSR.values[: ((fast_end - fast_start) // fast_step)]) * 1e6 ) else: qubit_frequency = data.df.frequency[ data.df.MSR.index[data.df.MSR.argmax()] ].magnitude avg_voltage = ( np.mean(data.df.MSR.values[: ((fast_end - fast_start) // fast_step)]) * 1e6 ) prec_data = Dataset( name=f"precision_sweep_q{qubit}", quantities={"frequency": "Hz"} ) freqrange = ( np.arange(precision_start, precision_end, precision_step) + qubit_frequency ) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield prec_data yield lorentzian_fit( data + prec_data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["qubit_freq", "peak_voltage"], ) platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } prec_data.add(results) count += 1 yield prec_data # TODO: Estimate avg_voltage correctly @plot("MSR and Phase vs Frequency", plots.frequency_flux_msr_phase) def qubit_spectroscopy_flux( platform: AbstractPlatform, qubit, freq_width, freq_step, current_max, current_min, current_step, software_averages, fluxline, points=10, ): platform.reload_settings() if fluxline == "qubit": fluxline = qubit sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=5000) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=5000) sequence.add(qd_pulse) sequence.add(ro_pulse) data = Dataset( name=f"data_q{qubit}", quantities={"frequency": "Hz", "current": "A"} ) qubit_frequency = platform.characterization["single_qubit"][qubit]["qubit_freq"] qubit_biasing_current = platform.characterization["single_qubit"][qubit][ "sweetspot" ] frequency_range = np.arange(-freq_width, freq_width, freq_step) + qubit_frequency current_range = ( np.arange(current_min, current_max, current_step) + qubit_biasing_current ) count = 0 for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data src/qcvv/calibrations/__init__.py METASEP # -*- coding: utf-8 -*- from qcvv.calibrations.qubit_spectroscopy import * from qcvv.calibrations.resonator_spectroscopy import * src/qcvv/calibrations/rabi_oscillations.py METASEP
[ { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for amplitude in qd_pulse_amplitude_range:\n qd_pulse.amplitude = amplitude\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"amplitude[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_amplitude\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"amplitude[dimensionless]\": amplitude,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs length and gain\", plots.duration_gain_msr_phase)\ndef rabi_pulse_length_and_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(\n name=f\"data_q{qubit}\", quantities={\"duration\": \"ns\", \"gain\": \"dimensionless\"}\n )\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"duration[ns]\": duration,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n\n yield data\n\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for amplitude in qd_pulse_amplitude_range:\n qd_pulse.amplitude = amplitude\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"amplitude[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_amplitude\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"amplitude[dimensionless]\": amplitude,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs length and gain\", plots.duration_gain_msr_phase)\ndef rabi_pulse_length_and_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(\n name=f\"data_q{qubit}\", quantities={\"duration\": \"ns\", \"gain\": \"dimensionless\"}\n )\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"duration[ns]\": duration,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n\n yield data\n\n\n@plot(\"MSR vs length and amplitude\", plots.duration_amplitude_msr_phase)\ndef rabi_pulse_length_and_amplitude(\n platform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for amplitude in qd_pulse_amplitude_range:\n qd_pulse.amplitude = amplitude\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"amplitude[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_amplitude\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"amplitude[dimensionless]\": amplitude,\n }\n data.add(results)\n count += 1\n yield data\n\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n", "type": "inproject" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for amplitude in qd_pulse_amplitude_range:\n qd_pulse.amplitude = amplitude\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"amplitude[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_amplitude\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"amplitude[dimensionless]\": amplitude,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs length and gain\", plots.duration_gain_msr_phase)\ndef rabi_pulse_length_and_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(\n name=f\"data_q{qubit}\", quantities={\"duration\": \"ns\", \"gain\": \"dimensionless\"}\n )\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for amplitude in qd_pulse_amplitude_range:\n qd_pulse.amplitude = amplitude\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"amplitude[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_amplitude\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"amplitude[dimensionless]\": amplitude,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs length and gain\", plots.duration_gain_msr_phase)\ndef rabi_pulse_length_and_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(\n name=f\"data_q{qubit}\", quantities={\"duration\": \"ns\", \"gain\": \"dimensionless\"}\n )\n\n sequence = PulseSequence()", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for amplitude in qd_pulse_amplitude_range:\n qd_pulse.amplitude = amplitude\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"amplitude[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_amplitude\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"amplitude[dimensionless]\": amplitude,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs length and gain\", plots.duration_gain_msr_phase)\ndef rabi_pulse_length_and_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(\n name=f\"data_q{qubit}\", quantities={\"duration\": \"ns\", \"gain\": \"dimensionless\"}\n )\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)", "type": "common" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for amplitude in qd_pulse_amplitude_range:\n qd_pulse.amplitude = amplitude\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"amplitude[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_amplitude\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"amplitude[dimensionless]\": amplitude,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs length and gain\", plots.duration_gain_msr_phase)\ndef rabi_pulse_length_and_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(\n name=f\"data_q{qubit}\", quantities={\"duration\": \"ns\", \"gain\": \"dimensionless\"}\n )\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"duration[ns]\": duration,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n\n yield data\n\n\n@plot(\"MSR vs length and amplitude\", plots.duration_amplitude_msr_phase)\ndef rabi_pulse_length_and_amplitude(\n platform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"duration\": \"ns\", \"amplitude\": \"dimensionless\"},\n )\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for amplitude in qd_pulse_amplitude_range:\n qd_pulse.amplitude = amplitude\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"amplitude[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_amplitude\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"amplitude[dimensionless]\": amplitude,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs length and gain\", plots.duration_gain_msr_phase)\ndef rabi_pulse_length_and_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(\n name=f\"data_q{qubit}\", quantities={\"duration\": \"ns\", \"gain\": \"dimensionless\"}\n )\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"duration[ns]\": duration,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n\n yield data\n\n\n@plot(\"MSR vs length and amplitude\", plots.duration_amplitude_msr_phase)", "type": "non_informative" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for amplitude in qd_pulse_amplitude_range:\n qd_pulse.amplitude = amplitude\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"amplitude[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_amplitude\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"amplitude[dimensionless]\": amplitude,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs length and gain\", plots.duration_gain_msr_phase)\ndef rabi_pulse_length_and_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)\ndef rabi_pulse_amplitude(\n platform,\n qubit,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_amplitude_range = np.arange(\n pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for amplitude in qd_pulse_amplitude_range:\n qd_pulse.amplitude = amplitude\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"amplitude[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_amplitude\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"amplitude[dimensionless]\": amplitude,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs length and gain\", plots.duration_gain_msr_phase)\ndef rabi_pulse_length_and_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(\n name=f\"data_q{qubit}\", quantities={\"duration\": \"ns\", \"gain\": \"dimensionless\"}\n )\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"duration[ns]\": duration,\n \"gain[dimensionless]\": gain,\n }\n data.add(results)\n count += 1\n\n yield data\n\n\n@plot(\"MSR vs length and amplitude\", plots.duration_amplitude_msr_phase)\ndef rabi_pulse_length_and_amplitude(\n platform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n pulse_amplitude_start,\n pulse_amplitude_end,\n pulse_amplitude_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(\n name=f\"data_q{qubit}\",\n quantities={\"duration\": \"ns\", \"amplitude\": \"dimensionless\"},\n )\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(", "type": "random" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom qibolab.platforms.abstract import AbstractPlatform\nfrom qibolab.pulses import PulseSequence\n\nfrom qcvv import plots\nfrom qcvv.data import Dataset\nfrom qcvv.decorators import plot\nfrom qcvv.fitting.methods import rabi_fit\n\n\n@plot(\"MSR vs Time\", plots.time_msr_phase)\ndef rabi_pulse_length(\n platform: AbstractPlatform,\n qubit,\n pulse_duration_start,\n pulse_duration_end,\n pulse_duration_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_duration_range = np.arange(\n pulse_duration_start, pulse_duration_end, pulse_duration_step\n )\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for duration in qd_pulse_duration_range:\n qd_pulse.duration = duration\n ro_pulse.start = duration\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"Time[ns]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_duration\",\n \"rabi_oscillations_pi_pulse_max_voltage\",\n \"t1\",\n ],\n )\n msr, phase, i, q = platform.execute_pulse_sequence(sequence)[\n ro_pulse.serial\n ]\n results = {\n \"MSR[V]\": msr,\n \"i[V]\": i,\n \"q[V]\": q,\n \"phase[rad]\": phase,\n \"Time[ns]\": duration,\n }\n data.add(results)\n count += 1\n yield data\n\n\n@plot(\"MSR vs Gain\", plots.gain_msr_phase)\ndef rabi_pulse_gain(\n platform: AbstractPlatform,\n qubit,\n pulse_gain_start,\n pulse_gain_end,\n pulse_gain_step,\n software_averages,\n points=10,\n):\n platform.reload_settings()\n\n data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})\n\n sequence = PulseSequence()\n qd_pulse = platform.create_RX_pulse(qubit, start=0)\n ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)\n sequence.add(qd_pulse)\n sequence.add(ro_pulse)\n\n qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step)\n\n # FIXME: Waiting to be able to pass qpucard to qibolab\n platform.ro_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"resonator_freq\"]\n - ro_pulse.frequency\n )\n platform.qd_port[qubit].lo_frequency = (\n platform.characterization[\"single_qubit\"][qubit][\"qubit_freq\"]\n - qd_pulse.frequency\n )\n\n count = 0\n for _ in range(software_averages):\n for gain in qd_pulse_gain_range:\n platform.qd_port[qubit].gain = gain\n if count % points == 0 and count > 0:\n yield data\n yield rabi_fit(\n data,\n x=\"gain[dimensionless]\",\n y=\"MSR[uV]\",\n qubit=qubit,\n nqubits=platform.settings[\"nqubits\"],\n labels=[\n \"pi_pulse_gain\",", "type": "random" } ]
[ "@plot(\"MSR vs Time\", plots.time_msr_phase)", " data.add(results)", "@plot(\"MSR vs Amplitude\", plots.amplitude_msr_phase)", "@plot(\"MSR vs length and amplitude\", plots.duration_amplitude_msr_phase)", " data = Dataset(name=f\"data_q{qubit}\", quantities={\"Time\": \"ns\"})", " sequence.add(qd_pulse)", " data = Dataset(name=f\"data_q{qubit}\", quantities={\"amplitude\": \"dimensionless\"})", " sequence.add(ro_pulse)", " data = Dataset(", "@plot(\"MSR vs Gain\", plots.gain_msr_phase)", "@plot(\"MSR vs length and gain\", plots.duration_gain_msr_phase)", " data = Dataset(name=f\"data_q{qubit}\", quantities={\"gain\": \"dimensionless\"})", " ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration)", " qd_pulse = platform.create_RX_pulse(qubit, start=0)", " ro_pulse.start = duration", " qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4)", " ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4)", " ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish)", " )", "", "from qibolab.platforms.abstract import AbstractPlatform", "def rabi_pulse_length_and_amplitude(", " pulse_duration_step,", " - qd_pulse.frequency", " pulse_duration_start, pulse_duration_end, pulse_duration_step", " \"rabi_oscillations_pi_pulse_max_voltage\"," ]
METASEP
16
qiboteam__qibocal
qiboteam__qibocal METASEP doc/source/conf.py METASEP # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys from recommonmark.transform import AutoStructify sys.path.insert(0, os.path.abspath("..")) import qibocal # -- Project information ----------------------------------------------------- project = "qibocal" copyright = "2022, The Qibo team" author = "The Qibo team" # The full version, including alpha/beta/rc tags release = qibocal.__version__ # -- General configuration --------------------------------------------------- # https://stackoverflow.com/questions/56336234/build-fail-sphinx-error-contents-rst-not-found master_doc = "index" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.intersphinx", "recommonmark", "sphinx_markdown_tables", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # Markdown configuration # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = {".rst": "restructuredtext", ".txt": "markdown", ".md": "markdown"} autosectionlabel_prefix_document = True # Allow to embed rst syntax in markdown files. enable_eval_rst = True # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # -- Intersphinx ------------------------------------------------------------- intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} # -- Autodoc ------------------------------------------------------------------ # autodoc_member_order = "bysource" # Adapted this from # https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/docs/conf.py # app setup hook def setup(app): app.add_config_value("recommonmark_config", {"enable_eval_rst": True}, True) app.add_transform(AutoStructify) app.add_css_file("css/style.css") serverscripts/qibocal-update-on-change.py METASEP #!/usr/bin/env python import argparse import curio import inotify.adapters import inotify.constants from curio import subprocess async def main(folder, exe_args): i = inotify.adapters.Inotify() i.add_watch(folder) for event in i.event_gen(yield_nones=False): if event is not None: (header, _, _, _) = event if ( (header.mask & inotify.constants.IN_CREATE) or (header.mask & inotify.constants.IN_DELETE) or (header.mask & inotify.constants.IN_MODIFY) ): await subprocess.run(exe_args) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("folder") parser.add_argument("exe_args", nargs="+") args = parser.parse_args() curio.run(main(args.folder, args.exe_args)) serverscripts/qibocal-index-reports.py METASEP """qibocal-index-reports.py Generates a JSON index with reports information. """ import json import pathlib import sys from collections import ChainMap import yaml ROOT = "/home/users/qibocal/qibocal-reports" ROOT_URL = "http://login.qrccluster.com:9000/" OUT = "/home/users/qibocal/qibocal-reports/index.json" DEFAULTS = { "title": "-", "date": "-", "platform": "-", "start-time": "-", "end-time": "-", } REQUIRED_FILE_METADATA = {"title", "date", "platform", "start-time" "end-time"} def meta_from_path(p): meta = ChainMap(DEFAULTS) yaml_meta = p / "meta.yml" yaml_res = {} if yaml_meta.exists(): with yaml_meta.open() as f: try: yaml_res = yaml.safe_load(f) except yaml.YAMLError as e: print(f"Error processing {yaml_meta}: {e}", file=sys.stderr) meta = meta.new_child(yaml_res) return meta def register(p): path_meta = meta_from_path(p) title, date, platform, start_time, end_time = ( path_meta["title"], path_meta["date"], path_meta["platform"], path_meta["start-time"], path_meta["end-time"], ) url = ROOT_URL + p.name titlelink = f'<a href="{url}">{title}</a>' return (titlelink, date, platform, start_time, end_time) def make_index(): root_path = pathlib.Path(ROOT) data = [] for p in root_path.iterdir(): if p.is_dir(): try: res = register(p) data.append(res) except: print("Error processing folder", p, file=sys.stderr) raise with open(OUT, "w") as f: json.dump({"data": data}, f) if __name__ == "__main__": make_index() src/qibocal/web/server.py METASEP import os import pathlib import yaml from flask import Flask, render_template from qibocal import __version__ from qibocal.cli.builders import ReportBuilder server = Flask(__name__) @server.route("/") @server.route("/data/<path>") def page(path=None): folders = [ folder for folder in reversed(sorted(os.listdir(os.getcwd()))) if os.path.isdir(folder) and "meta.yml" in os.listdir(folder) ] report = None if path is not None: try: report = ReportBuilder(path) except (FileNotFoundError, TypeError): pass return render_template( "template.html", version=__version__, folders=folders, report=report, ) src/qibocal/web/report.py METASEP import os import pathlib from jinja2 import Environment, FileSystemLoader from qibocal import __version__ from qibocal.cli.builders import ReportBuilder def create_report(path): """Creates an HTML report for the data in the given path.""" filepath = pathlib.Path(__file__) with open(os.path.join(filepath.with_name("static"), "styles.css")) as file: css_styles = f"<style>\n{file.read()}\n</style>" report = ReportBuilder(path) env = Environment(loader=FileSystemLoader(filepath.with_name("templates"))) template = env.get_template("template.html") html = template.render( is_static=True, css_styles=css_styles, version=__version__, report=report, ) with open(os.path.join(path, "index.html"), "w") as file: file.write(html) src/qibocal/web/app.py METASEP import os import pandas as pd import yaml from dash import Dash, Input, Output, dcc, html from qibocal import plots from qibocal.data import DataUnits from qibocal.web.server import server DataUnits() # dummy dataset call to suppress ``pint[V]`` error app = Dash( server=server, suppress_callback_exceptions=True, ) app.layout = html.Div( [ dcc.Location(id="url", refresh=False), dcc.Graph(id="graph", figure={}), dcc.Interval( id="interval", # TODO: Perhaps the user should be allowed to change the refresh rate interval=1000, n_intervals=0, disabled=False, ), ] ) @app.callback( Output("graph", "figure"), Input("interval", "n_intervals"), Input("graph", "figure"), Input("url", "pathname"), ) def get_graph(n, current_figure, url): method, folder, routine, qubit, format = url.split(os.sep)[2:] try: # data = DataUnits.load_data(folder, routine, format, "precision_sweep") # with open(f"{folder}/platform.yml", "r") as f: # nqubits = yaml.safe_load(f)["nqubits"] # if len(data) > 2: # params, fit = resonator_spectroscopy_fit(folder, format, nqubits) # else: # params, fit = None, None # return getattr(plots.resonator_spectroscopy, method)(data, params, fit) # # FIXME: Temporarily hardcode the plotting method to test # # multiple routines with different names in one folder # # should be changed to: # # return getattr(getattr(plots, routine), method)(data) return getattr(plots, method)(folder, routine, qubit, format) except (FileNotFoundError, pd.errors.EmptyDataError): return current_figure src/qibocal/web/__init__.py METASEP src/qibocal/tests/test_data.py METASEP """Some tests for the Data and DataUnits class""" import numpy as np import pytest from pint import DimensionalityError, UndefinedUnitError from qibocal.data import Data, DataUnits def random_data_units(length, options=None): data = DataUnits(options=options) for l in range(length): msr, i, q, phase = np.random.rand(4) pulse_sequence_result = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, } add_options = {} if options is not None: for option in options: add_options[option] = str(l) data.add({**pulse_sequence_result, **add_options}) return data def random_data(length): data = Data() for i in range(length): data.add({"int": int(i), "float": float(i), "string": str(i), "bool": bool(i)}) return data def test_data_initialization(): """Test DataUnits constructor""" data = DataUnits() assert len(data.df.columns) == 4 assert list(data.df.columns) == [ # pylint: disable=E1101 "MSR", "i", "q", "phase", ] data1 = DataUnits(quantities={"attenuation": "dB"}) assert len(data1.df.columns) == 5 assert list(data1.df.columns) == [ # pylint: disable=E1101 "attenuation", "MSR", "i", "q", "phase", ] data2 = DataUnits(quantities={"attenuation": "dB"}, options=["option1"]) assert len(data2.df.columns) == 6 assert list(data2.df.columns) == [ # pylint: disable=E1101 "option1", "attenuation", "MSR", "i", "q", "phase", ] def test_data_units_units(): """Test units of measure in DataUnits""" data_units = DataUnits() assert data_units.df.MSR.values.units == "volt" data_units1 = DataUnits(quantities={"frequency": "Hz"}) assert data_units1.df.frequency.values.units == "hertz" with pytest.raises(UndefinedUnitError): data_units2 = DataUnits(quantities={"fake_unit": "fake"}) def test_data_units_add(): """Test add method of DataUnits""" data_units = random_data_units(5) assert len(data_units) == 5 data_units1 = DataUnits(quantities={"attenuation": "dB"}) msr, i, q, phase, att = np.random.rand(len(data_units1.df.columns)) data_units1.add( { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "attenuation[dB]": att, } ) assert len(data_units1) == 1 data_units1.add( { "MSR[V]": 0, "i[V]": 0.0, "q[V]": 0.0, "phase[deg]": 0, "attenuation[dB]": 1, } ) assert len(data_units1) == 2 data_units2 = DataUnits() msr, i, q, phase = np.random.rand(len(data_units2.df.columns)) with pytest.raises(DimensionalityError): data_units2.add({"MSR[dB]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) with pytest.raises(UndefinedUnitError): data_units2.add({"MSR[test]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase}) data_units3 = random_data_units(10, options=["test"]) assert len(data_units3) == 10 def test_data_add(): """Test add method of Data class""" data = random_data(5) assert len(data) == 5 data.add({"int": 123, "float": 123.456, "string": "123", "bool": True}) assert len(data) == 6 def test_data_units_load_data_from_dict(): """Test set method of DataUnits class""" data_units = DataUnits() test = { "MSR[V]": [1, 2, 3], "i[V]": [3.0, 4.0, 5.0], "q[V]": np.array([3, 4, 5]), "phase[deg]": [6.0, 7.0, 8.0], } data_units.load_data_from_dict(test) assert len(data_units) == 3 assert (data_units.get_values("MSR", "V") == [1, 2, 3]).all() assert (data_units.get_values("i", "V") == [3.0, 4.0, 5.0]).all() assert (data_units.get_values("q", "V") == [3, 4, 5]).all() assert (data_units.get_values("phase", "deg") == [6.0, 7.0, 8.0]).all() data_units1 = DataUnits(options=["option1", "option2"]) test = {"option1": ["one", "two", "three"], "option2": [1, 2, 3]} data_units1.load_data_from_dict(test) assert len(data_units1) == 3 assert (data_units1.get_values("option1") == ["one", "two", "three"]).all() assert (data_units1.get_values("option2") == [1, 2, 3]).all() def test_data_load_data_from_dict(): """Test set method of Data class""" data = random_data(5) test = { "int": [1, 2, 3], "float": [3.0, 4.0, 5.0], "string": ["one", "two", "three"], "bool": [True, False, True], } data.load_data_from_dict(test) assert len(data) == 3 assert (data.get_values("int") == [1, 2, 3]).all() assert (data.get_values("float") == [3.0, 4.0, 5.0]).all() assert (data.get_values("string") == ["one", "two", "three"]).all() assert (data.get_values("bool") == [True, False, True]).all() def test_get_values_data_units(): """Test get_values method of DataUnits class""" data_units = random_data_units(5, options=["option"]) assert (data_units.get_values("option") == data_units.df["option"]).all() assert ( data_units.get_values("MSR", "uV") == data_units.df["MSR"].pint.to("uV").pint.magnitude ).all() def test_get_values_data(): """Test get_values method of Data class""" data = random_data(5) assert (data.get_values("int") == data.df["int"]).all() src/qibocal/plots/t1.py METASEP import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots from qibocal.data import Data, DataUnits from qibocal.fitting.utils import exp # T1 def t1_time_msr_phase(folder, routine, qubit, format): try: data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") except: data = DataUnits(quantities={"Time": "ns"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = DataUnits() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("MSR", "uV"), name="T1", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("phase", "rad"), name="T1", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("Time", "ns")), max(data.get_values("Time", "ns")), 2 * len(data), ) params = [i for i in list(data_fit.df.keys()) if "popt" not in i] fig.add_trace( go.Scatter( x=timerange, y=exp( timerange, data_fit.get_values("popt0"), data_fit.get_values("popt1"), data_fit.get_values("popt2"), ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} ns.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # last part fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Time (ns)", yaxis_title="MSR (uV)", xaxis2_title="Time (ns)", yaxis2_title="Phase (rad)", ) return fig src/qibocal/plots/spectroscopies.py METASEP import os import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots from qibocal.data import Data, DataUnits from qibocal.fitting.utils import lorenzian def frequency_msr_phase__fast_precision(folder, routine, qubit, format): try: data_fast = DataUnits.load_data(folder, routine, format, f"fast_sweep_q{qubit}") except: data_fast = DataUnits(quantities={"frequency": "Hz"}) try: data_precision = DataUnits.load_data( folder, routine, format, f"precision_sweep_q{qubit}" ) except: data_precision = DataUnits(quantities={"frequency": "Hz"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Data( quantities=[ "popt0", "popt1", "popt2", "popt3", "label1", "label2", ] ) fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data_fast.get_values("frequency", "GHz"), y=data_fast.get_values("MSR", "uV"), name="Fast", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_fast.get_values("frequency", "GHz"), y=data_fast.get_values("phase", "rad"), name="Fast", ), row=1, col=2, ) fig.add_trace( go.Scatter( x=data_precision.get_values("frequency", "GHz"), y=data_precision.get_values("MSR", "uV"), name="Precision", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_precision.get_values("frequency", "GHz"), y=data_precision.get_values("phase", "rad"), name="Precision", ), row=1, col=2, ) if len(data_fast) > 0 and len(data_fit) > 0: freqrange = np.linspace( min(data_fast.get_values("frequency", "GHz")), max(data_fast.get_values("frequency", "GHz")), 2 * len(data_fast), ) params = [i for i in list(data_fit.df.keys()) if "popt" not in i] fig.add_trace( go.Scatter( x=freqrange, y=lorenzian( freqrange, data_fit.get_values("popt0"), data_fit.get_values("popt1"), data_fit.get_values("popt2"), data_fit.get_values("popt3"), ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"The estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="MSR (uV)", xaxis2_title="Frequency (GHz)", yaxis2_title="Phase (rad)", ) return fig def frequency_attenuation_msr_phase__cut(folder, routine, qubit, format): data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") plot1d_attenuation = 30 # attenuation value to use for 1D frequency vs MSR plot fig = go.Figure() # index data on a specific attenuation value smalldf = data.df[data.get_values("attenuation", "dB") == plot1d_attenuation].copy() # split multiple software averages to different datasets datasets = [] while len(smalldf): datasets.append(smalldf.drop_duplicates("frequency")) smalldf.drop(datasets[-1].index, inplace=True) fig.add_trace( go.Scatter( x=datasets[-1]["frequency"].pint.to("GHz").pint.magnitude, y=datasets[-1]["MSR"].pint.to("V").pint.magnitude, ), ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting, xaxis_title="Frequency (GHz)", yaxis_title="MSR (V)", ) return fig def frequency_flux_msr_phase(folder, routine, qubit, format): data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="Current (A)", xaxis2_title="Frequency (GHz)", yaxis2_title="Current (A)", ) return fig def frequency_flux_msr_phase__matrix(folder, routine, qubit, format): fluxes = [] for i in range(25): # FIXME: 25 is hardcoded file = f"{folder}/data/{routine}/data_q{qubit}_f{i}.csv" if os.path.exists(file): fluxes += [i] if len(fluxes) < 1: nb = 1 else: nb = len(fluxes) fig = make_subplots( rows=2, cols=nb, horizontal_spacing=0.1, vertical_spacing=0.1, x_title="Frequency (Hz)", y_title="Current (A)", shared_xaxes=True, shared_yaxes=True, ) for j in fluxes: if j == fluxes[-1]: showscale = True else: showscale = False data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}_f{j}") fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("MSR", "V"), showscale=showscale, ), row=1, col=j, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("current", "A"), z=data.get_values("phase", "rad"), showscale=showscale, ), row=2, col=j, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting ) return fig def frequency_attenuation_msr_phase(folder, routine, qubit, format): data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("attenuation", "dB"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("frequency", "GHz"), y=data.get_values("attenuation", "dB"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="Attenuation (dB)", xaxis2_title="Frequency (GHz)", yaxis2_title="Attenuation (dB)", ) return fig def dispersive_frequency_msr_phase(folder, routine, qubit, formato): try: data_spec = DataUnits.load_data(folder, routine, formato, f"data_q{qubit}") except: data_spec = DataUnits(name=f"data_q{qubit}", quantities={"frequency": "Hz"}) try: data_shifted = DataUnits.load_data( folder, routine, formato, f"data_shifted_q{qubit}" ) except: data_shifted = DataUnits( name=f"data_shifted_q{qubit}", quantities={"frequency": "Hz"} ) try: data_fit = Data.load_data(folder, routine, formato, f"fit_q{qubit}") except: data_fit = Data( quantities=[ "popt0", "popt1", "popt2", "popt3", "label1", "label2", ] ) try: data_fit_shifted = Data.load_data( folder, routine, formato, f"fit_shifted_q{qubit}" ) except: data_fit_shifted = Data( quantities=[ "popt0", "popt1", "popt2", "popt3", "label1", "label2", ] ) fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data_spec.get_values("frequency", "GHz"), y=data_spec.get_values("MSR", "uV"), name="Spectroscopy", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_spec.get_values("frequency", "GHz"), y=data_spec.get_values("phase", "rad"), name="Spectroscopy", ), row=1, col=2, ) fig.add_trace( go.Scatter( x=data_shifted.get_values("frequency", "GHz"), y=data_shifted.get_values("MSR", "uV"), name="Shifted Spectroscopy", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data_shifted.get_values("frequency", "GHz"), y=data_shifted.get_values("phase", "rad"), name="Shifted Spectroscopy", ), row=1, col=2, ) # fitting traces if len(data_spec) > 0 and len(data_fit) > 0: freqrange = np.linspace( min(data_spec.get_values("frequency", "GHz")), max(data_spec.get_values("frequency", "GHz")), 2 * len(data_spec), ) params = [i for i in list(data_fit.df.keys()) if "popt" not in i] fig.add_trace( go.Scatter( x=freqrange, y=lorenzian( freqrange, data_fit.get_values("popt0"), data_fit.get_values("popt1"), data_fit.get_values("popt2"), data_fit.get_values("popt3"), ), name="Fit spectroscopy", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"The estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} Hz.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # fitting shifted traces if len(data_shifted) > 0 and len(data_fit_shifted) > 0: freqrange = np.linspace( min(data_shifted.get_values("frequency", "GHz")), max(data_shifted.get_values("frequency", "GHz")), 2 * len(data_shifted), ) params = [i for i in list(data_fit_shifted.df.keys()) if "popt" not in i] fig.add_trace( go.Scatter( x=freqrange, y=lorenzian( freqrange, data_fit_shifted.get_values("popt0"), data_fit_shifted.get_values("popt1"), data_fit_shifted.get_values("popt2"), data_fit_shifted.get_values("popt3"), ), name="Fit shifted spectroscopy", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"The estimated shifted {params[0]} is {data_fit_shifted.df[params[0]][0]:.1f} Hz.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Frequency (GHz)", yaxis_title="MSR (uV)", xaxis2_title="Frequency (GHz)", yaxis2_title="Phase (rad)", ) return fig src/qibocal/plots/ramsey.py METASEP import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots from qibocal.data import Data, DataUnits from qibocal.fitting.utils import ramsey # For Ramsey oscillations def time_msr(folder, routine, qubit, format): try: data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") except: data = DataUnits( name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"} ) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = DataUnits() fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=("MSR (V)",), ) fig.add_trace( go.Scatter( x=data.get_values("wait", "ns"), y=data.get_values("MSR", "uV"), name="Ramsey", ), row=1, col=1, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("wait", "ns")), max(data.get_values("wait", "ns")), 2 * len(data), ) params = [i for i in list(data_fit.df.keys()) if "popt" not in i] fig.add_trace( go.Scatter( x=timerange, y=ramsey( timerange, data_fit.get_values("popt0"), data_fit.get_values("popt1"), data_fit.get_values("popt2"), data_fit.get_values("popt3"), data_fit.get_values("popt4"), ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} Hz.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} ns", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"Estimated {params[2]} is {data_fit.df[params[2]][0]:.3f} Hz", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Time (ns)", yaxis_title="MSR (uV)", ) return fig src/qibocal/plots/rabi.py METASEP import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots from qibocal.data import Data, DataUnits from qibocal.fitting.utils import rabi # For Rabi oscillations def time_msr_phase(folder, routine, qubit, format): try: data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") except: data = DataUnits(quantities={"Time": "ns"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Data( quantities=[ "popt0", "popt1", "popt2", "popt3", "popt4", "label1", "label2", ] ) fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("MSR", "uV"), name="Rabi Oscillations", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("Time", "ns"), y=data.get_values("phase", "rad"), name="Rabi Oscillations", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: timerange = np.linspace( min(data.get_values("Time", "ns")), max(data.get_values("Time", "ns")), 2 * len(data), ) params = [i for i in list(data_fit.df.keys()) if "popt" not in i] fig.add_trace( go.Scatter( x=timerange, y=rabi( timerange, data_fit.get_values("popt0"), data_fit.get_values("popt1"), data_fit.get_values("popt2"), data_fit.get_values("popt3"), data_fit.get_values("popt4"), ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f} ns.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.1f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # last part fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Time (ns)", yaxis_title="MSR (uV)", xaxis2_title="Time (ns)", yaxis2_title="Phase (rad)", ) return fig def gain_msr_phase(folder, routine, qubit, format): try: data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") except: data = DataUnits(quantities={"gain", "dimensionless"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = Data( quantities=[ "popt0", "popt1", "popt2", "popt3", "popt4", "label1", "label2", ] ) fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("gain", "dimensionless"), y=data.get_values("MSR", "uV"), name="Rabi Oscillations", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("gain", "dimensionless"), y=data.get_values("phase", "rad"), name="Rabi Oscillations", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: gainrange = np.linspace( min(data.get_values("gain", "dimensionless")), max(data.get_values("gain", "dimensionless")), 2 * len(data), ) params = [i for i in list(data_fit.df.keys()) if "popt" not in i] fig.add_trace( go.Scatter( x=gainrange, y=rabi( gainrange, data_fit.get_values("popt0"), data_fit.get_values("popt1"), data_fit.get_values("popt2"), data_fit.get_values("popt3"), data_fit.get_values("popt4"), ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.4f} uV", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Gain (dimensionless)", yaxis_title="MSR (uV)", ) return fig def amplitude_msr_phase(folder, routine, qubit, format): try: data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") except: data = DataUnits(quantities={"amplitude", "dimensionless"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = DataUnits() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("amplitude", "dimensionless"), y=data.get_values("MSR", "uV"), name="Rabi Oscillations", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("amplitude", "dimensionless"), y=data.get_values("phase", "rad"), name="Rabi Oscillations", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: amplituderange = np.linspace( min(data.get_values("amplitude", "dimensionless")), max(data.get_values("amplitude", "dimensionless")), 2 * len(data), ) params = [i for i in list(data_fit.df.keys()) if "popt" not in i] fig.add_trace( go.Scatter( x=amplituderange, y=rabi( amplituderange, data_fit.get_values("popt0"), data_fit.get_values("popt1"), data_fit.get_values("popt2"), data_fit.get_values("popt3"), data_fit.get_values("popt4"), ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.3f} uV.", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.4f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Amplitude (dimensionless)", yaxis_title="MSR (uV)", ) return fig def duration_gain_msr_phase(folder, routine, qubit, format): data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("gain", "dimensionless"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("gain", "dimensionless"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="duration (ns)", yaxis_title="gain (dimensionless)", xaxis2_title="duration (ns)", yaxis2_title="gain (dimensionless)", ) return fig def duration_amplitude_msr_phase(folder, routine, qubit, format): data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("amplitude", "dimensionless"), z=data.get_values("MSR", "V"), colorbar_x=0.45, ), row=1, col=1, ) fig.add_trace( go.Heatmap( x=data.get_values("duration", "ns"), y=data.get_values("amplitude", "dimensionless"), z=data.get_values("phase", "rad"), colorbar_x=1.0, ), row=1, col=2, ) fig.update_layout( showlegend=False, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="duration (ns)", yaxis_title="amplitude (dimensionless)", xaxis2_title="duration (ns)", yaxis2_title="amplitude (dimensionless)", ) return fig src/qibocal/plots/flipping.py METASEP import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots from qibocal.data import Data, DataUnits from qibocal.fitting.utils import flipping # Flipping def flips_msr_phase(folder, routine, qubit, format): try: data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") except: data = DataUnits(quantities={"flips": "dimensionless"}) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = DataUnits() fig = make_subplots( rows=1, cols=2, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=( "MSR (V)", "phase (rad)", ), ) fig.add_trace( go.Scatter( x=data.get_values("flips", "dimensionless"), y=data.get_values("MSR", "uV"), name="Flipping MSR", ), row=1, col=1, ) fig.add_trace( go.Scatter( x=data.get_values("flips", "dimensionless"), y=data.get_values("phase", "rad"), name="Flipping Phase", ), row=1, col=2, ) # add fitting trace if len(data) > 0 and len(data_fit) > 0: flipsrange = np.linspace( min(data.get_values("flips", "dimensionless")), max(data.get_values("flips", "dimensionless")), 2 * len(data), ) params = [i for i in list(data_fit.df.keys()) if "popt" not in i] fig.add_trace( go.Scatter( x=flipsrange, y=flipping( flipsrange, data_fit.get_values("popt0"), data_fit.get_values("popt1"), data_fit.get_values("popt2"), data_fit.get_values("popt3"), ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.25, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.4f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.30, showarrow=False, text=f"Estimated {params[1]} is {data_fit.df[params[1]][0]:.3f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) # last part fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Flips (dimensionless)", yaxis_title="MSR (uV)", xaxis2_title="Flips (dimensionless)", yaxis2_title="Phase (rad)", ) return fig src/qibocal/plots/calibrate_qubit_states.py METASEP import plotly.graph_objects as go from plotly.subplots import make_subplots from qibocal.data import DataUnits # For calibrate qubit states def exc_gnd(folder, routine, qubit, format): try: data_exc = DataUnits.load_data(folder, routine, format, f"data_exc_q{qubit}") except: data_exc = DataUnits(quantities={"iteration": "dimensionless"}) fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=("Calibrate qubit states",), ) fig.add_trace( go.Scatter( x=data_exc.get_values("i", "V"), y=data_exc.get_values("q", "V"), name="exc_state", mode="markers", marker=dict(size=3, color="lightcoral"), ), row=1, col=1, ) try: data_gnd = DataUnits.load_data(folder, routine, format, f"data_gnd_q{qubit}") except: data_gnd = DataUnits(quantities={"iteration": "dimensionless"}) fig.add_trace( go.Scatter( x=data_gnd.get_values("i", "V"), y=data_gnd.get_values("q", "V"), name="gnd state", mode="markers", marker=dict(size=3, color="skyblue"), ), row=1, col=1, ) i_exc = data_exc.get_values("i", "V") q_exc = data_exc.get_values("q", "V") i_mean_exc = i_exc.mean() q_mean_exc = q_exc.mean() iq_mean_exc = complex(i_mean_exc, q_mean_exc) mod_iq_exc = abs(iq_mean_exc) * 1e6 fig.add_trace( go.Scatter( x=[i_mean_exc], y=[q_mean_exc], name=f" state1_voltage: {mod_iq_exc} <br> mean_state1: {iq_mean_exc}", mode="markers", marker=dict(size=10, color="red"), ), row=1, col=1, ) i_gnd = data_gnd.get_values("i", "V") q_gnd = data_gnd.get_values("q", "V") i_mean_gnd = i_gnd.mean() q_mean_gnd = q_gnd.mean() iq_mean_gnd = complex(i_mean_gnd, q_mean_gnd) mod_iq_gnd = abs(iq_mean_gnd) * 1e6 fig.add_trace( go.Scatter( x=[i_mean_gnd], y=[q_mean_gnd], name=f" state0_voltage: {mod_iq_gnd} <br> mean_state0: {iq_mean_gnd}", mode="markers", marker=dict(size=10, color="blue"), ), row=1, col=1, ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="i (V)", yaxis_title="q (V)", width=1000, ) return fig src/qibocal/plots/allXY.py METASEP import numpy as np import plotly.graph_objects as go from plotly.subplots import make_subplots from qibocal.data import Data, DataUnits from qibocal.fitting.utils import cos # allXY def prob_gate(folder, routine, qubit, format): try: data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") except: data = DataUnits( quantities={"probability": "dimensionless", "gateNumber": "dimensionless"} ) fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=(f"allXY",), ) fig.add_trace( go.Scatter( x=data.get_values("gateNumber", "dimensionless"), y=data.get_values("probability", "dimensionless"), mode="markers", name="Probabilities", ), row=1, col=1, ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Gate sequence number", yaxis_title="Z projection probability of qubit state |o>", ) return fig # allXY def prob_gate_iteration(folder, routine, qubit, format): try: data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") except: data = DataUnits( quantities={ "probability": "dimensionless", "gateNumber": "dimensionless", "beta_param": "dimensionless", } ) data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.1, vertical_spacing=0.1, subplot_titles=(f"allXY",), ) gates = len(data.get_values("gateNumber", "dimensionless")) # print(gates) import numpy as np for n in range(gates // 21): data_start = n * 21 data_end = data_start + 21 beta_param = np.array(data.get_values("beta_param", "dimensionless"))[ data_start ] gates = np.array(data.get_values("gateNumber", "dimensionless"))[ data_start:data_end ] probabilities = np.array(data.get_values("probability", "dimensionless"))[ data_start:data_end ] c = "#" + "{:06x}".format(n * 99999) fig.add_trace( go.Scatter( x=gates, y=probabilities, mode="markers+lines", line=dict(color=c), name=f"beta_parameter = {beta_param}", marker_size=16, ), row=1, col=1, ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Gate sequence number", yaxis_title="Z projection probability of qubit state |o>", ) return fig # beta param tuning def msr_beta(folder, routine, qubit, format): try: data = DataUnits.load_data(folder, routine, format, f"data_q{qubit}") except: data = DataUnits( name=f"data_q{qubit}", quantities={"beta_param": "dimensionless"} ) try: data_fit = Data.load_data(folder, routine, format, f"fit_q{qubit}") except: data_fit = DataUnits() fig = make_subplots( rows=1, cols=1, horizontal_spacing=0.01, vertical_spacing=0.01, subplot_titles=(f"beta_param_tuning",), ) c = "#6597aa" fig.add_trace( go.Scatter( x=data.get_values("beta_param", "dimensionless"), y=data.get_values("MSR", "uV"), line=dict(color=c), mode="markers", name="[Rx(pi/2) - Ry(pi)] - [Ry(pi/2) - Rx(pi)]", ), row=1, col=1, ) # add fitting traces if len(data) > 0 and len(data_fit) > 0: beta_param = np.linspace( min(data.get_values("beta_param", "dimensionless")), max(data.get_values("beta_param", "dimensionless")), 20, ) params = [i for i in list(data_fit.df.keys()) if "popt" not in i] fig.add_trace( go.Scatter( x=beta_param, y=cos( beta_param, data_fit.get_values("popt0"), data_fit.get_values("popt1"), data_fit.get_values("popt2"), data_fit.get_values("popt3"), ), name="Fit", line=go.scatter.Line(dash="dot"), ), row=1, col=1, ) fig.add_annotation( dict( font=dict(color="black", size=12), x=0, y=-0.20, showarrow=False, text=f"Estimated {params[0]} is {data_fit.df[params[0]][0]:.4f}", textangle=0, xanchor="left", xref="paper", yref="paper", ) ) fig.update_layout( showlegend=True, uirevision="0", # ``uirevision`` allows zooming while live plotting xaxis_title="Beta parameter", yaxis_title="MSR[uV]", ) return fig src/qibocal/plots/__init__.py METASEP from qibocal.plots.allXY import * from qibocal.plots.calibrate_qubit_states import * from qibocal.plots.flipping import * from qibocal.plots.rabi import * from qibocal.plots.ramsey import * from qibocal.plots.spectroscopies import * from qibocal.plots.t1 import * src/qibocal/fitting/utils.py METASEP import re import numpy as np def lorenzian(frequency, amplitude, center, sigma, offset): # http://openafox.com/science/peak-function-derivations.html return (amplitude / np.pi) * ( sigma / ((frequency - center) ** 2 + sigma**2) ) + offset def rabi(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # Period T : 1/p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(-x * p4) def ramsey(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # DeltaFreq : p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(-x * p4) def exp(x, *p): return p[0] - p[1] * np.exp(-1 * x * p[2]) def flipping(x, p0, p1, p2, p3): # A fit to Flipping Qubit oscillation # Epsilon?? shoule be Amplitude : p[0] # Offset : p[1] # Period of oscillation : p[2] # phase for the first point corresponding to pi/2 rotation : p[3] return np.sin(x * 2 * np.pi / p2 + p3) * p0 + p1 def cos(x, p0, p1, p2, p3): # Offset : p[0] # Amplitude : p[1] # Period : p[2] # Phase : p[3] return p0 + p1 * np.cos(2 * np.pi * x / p2 + p3) def parse(key): name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) return name, unit src/qibocal/fitting/methods.py METASEP """Routine-specific method for post-processing data acquired.""" import lmfit import numpy as np from scipy.optimize import curve_fit from qibocal.config import log from qibocal.data import Data from qibocal.fitting.utils import cos, exp, flipping, lorenzian, parse, rabi, ramsey def lorentzian_fit(data, x, y, qubit, nqubits, labels, fit_file_name=None): """Fitting routine for resonator spectroscopy""" if fit_file_name == None: data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", labels[1], labels[0], ], ) else: data_fit = Data( name=fit_file_name + f"_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", labels[1], labels[0], ], ) frequencies = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) # Create a lmfit model for fitting equation defined in resonator_peak model_Q = lmfit.Model(lorenzian) # Guess parameters for Lorentzian max or min if (nqubits == 1 and labels[0] == "resonator_freq") or ( nqubits != 1 and labels[0] == "qubit_freq" ): guess_center = frequencies[ np.argmax(voltages) ] # Argmax = Returns the indices of the maximum values along an axis. guess_offset = np.mean( voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))] ) guess_sigma = abs(frequencies[np.argmin(voltages)] - guess_center) guess_amp = (np.max(voltages) - guess_offset) * guess_sigma * np.pi else: guess_center = frequencies[ np.argmin(voltages) ] # Argmin = Returns the indices of the minimum values along an axis. guess_offset = np.mean( voltages[np.abs(voltages - np.mean(voltages) < np.std(voltages))] ) guess_sigma = abs(frequencies[np.argmax(voltages)] - guess_center) guess_amp = (np.min(voltages) - guess_offset) * guess_sigma * np.pi # Add guessed parameters to the model model_Q.set_param_hint("center", value=guess_center, vary=True) model_Q.set_param_hint("sigma", value=guess_sigma, vary=True) model_Q.set_param_hint("amplitude", value=guess_amp, vary=True) model_Q.set_param_hint("offset", value=guess_offset, vary=True) guess_parameters = model_Q.make_params() # fit the model with the data and guessed parameters try: fit_res = model_Q.fit( data=voltages, frequency=frequencies, params=guess_parameters ) except: log.warning("The fitting was not successful") return data_fit # get the values for postprocessing and for legend. f0 = fit_res.best_values["center"] BW = fit_res.best_values["sigma"] * 2 Q = abs(f0 / BW) peak_voltage = ( fit_res.best_values["amplitude"] / (fit_res.best_values["sigma"] * np.pi) + fit_res.best_values["offset"] ) freq = f0 * 1e9 data_fit.add( { labels[1]: peak_voltage, labels[0]: freq, "popt0": fit_res.best_values["amplitude"], "popt1": fit_res.best_values["center"], "popt2": fit_res.best_values["sigma"], "popt3": fit_res.best_values["offset"], } ) return data_fit def rabi_fit(data, x, y, qubit, nqubits, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", "popt4", labels[0], labels[1], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmin(voltages.values)], np.pi / 2, 0.1e-6, ] else: pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmax(voltages.values)], np.pi / 2, 0.1e-6, ] try: popt, pcov = curve_fit( rabi, time.values, voltages.values, p0=pguess, maxfev=10000 ) smooth_dataset = rabi(time.values, *popt) pi_pulse_duration = np.abs((1.0 / popt[2]) / 2) pi_pulse_max_voltage = smooth_dataset.max() t2 = 1.0 / popt[4] # double check T1 except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], "popt4": popt[4], labels[0]: pi_pulse_duration, labels[1]: pi_pulse_max_voltage, } ) return data_fit def ramsey_fit(data, x, y, qubit, qubit_freq, sampling_rate, offset_freq, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", "popt4", labels[0], labels[1], labels[2], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) pguess = [ np.mean(voltages.values), np.max(voltages.values) - np.min(voltages.values), 0.5 / time.values[np.argmin(voltages.values)], np.pi / 2, 500e-9, ] try: popt, pcov = curve_fit( ramsey, time.values, voltages.values, p0=pguess, maxfev=2000000 ) delta_fitting = popt[2] delta_phys = int((delta_fitting * sampling_rate) - offset_freq) corrected_qubit_frequency = int(qubit_freq + delta_phys) t2 = 1.0 / popt[4] except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], "popt4": popt[4], labels[0]: delta_phys, labels[1]: corrected_qubit_frequency, labels[2]: t2, } ) return data_fit def t1_fit(data, x, y, qubit, nqubits, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", labels[0], ], ) time = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [ max(voltages.values), (max(voltages.values) - min(voltages.values)), 1 / 250, ] else: pguess = [ min(voltages.values), (max(voltages.values) - min(voltages.values)), 1 / 250, ] try: popt, pcov = curve_fit( exp, time.values, voltages.values, p0=pguess, maxfev=2000000 ) t1 = abs(1 / popt[2]) except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], labels[0]: t1, } ) return data_fit def flipping_fit(data, x, y, qubit, nqubits, niter, pi_pulse_amplitude, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", labels[0], labels[1], ], ) flips = data.get_values(*parse(x)) # Check X data stores. N flips or i? voltages = data.get_values(*parse(y)) if nqubits == 1: pguess = [0.0003, np.mean(voltages), -18, 0] # epsilon guess parameter else: pguess = [0.0003, np.mean(voltages), 18, 0] # epsilon guess parameter try: popt, pcov = curve_fit(flipping, flips, voltages, p0=pguess, maxfev=2000000) epsilon = -np.pi / popt[2] amplitude_delta = np.pi / (np.pi + epsilon) corrected_amplitude = amplitude_delta * pi_pulse_amplitude # angle = (niter * 2 * np.pi / popt[2] + popt[3]) / (1 + 4 * niter) # amplitude_delta = angle * 2 / np.pi * pi_pulse_amplitude except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], labels[0]: amplitude_delta, labels[1]: corrected_amplitude, } ) return data_fit def drag_tunning_fit(data, x, y, qubit, nqubits, labels): data_fit = Data( name=f"fit_q{qubit}", quantities=[ "popt0", "popt1", "popt2", "popt3", labels[0], ], ) beta_params = data.get_values(*parse(x)) voltages = data.get_values(*parse(y)) pguess = [ 0, # Offset: p[0] beta_params.values[np.argmax(voltages)] - beta_params.values[np.argmin(voltages)], # Amplitude: p[1] 4, # Period: p[2] 0.3, # Phase: p[3] ] try: popt, pcov = curve_fit(cos, beta_params.values, voltages.values) smooth_dataset = cos(beta_params.values, popt[0], popt[1], popt[2], popt[3]) beta_optimal = beta_params.values[np.argmin(smooth_dataset)] except: log.warning("The fitting was not succesful") return data_fit data_fit.add( { "popt0": popt[0], "popt1": popt[1], "popt2": popt[2], "popt3": popt[3], labels[0]: beta_optimal, } ) return data_fit src/qibocal/fitting/__init__.py METASEP src/qibocal/cli/builders.py METASEP import datetime import inspect import os import shutil import yaml from qibocal import calibrations from qibocal.config import log, raise_error from qibocal.data import Data def load_yaml(path): """Load yaml file from disk.""" with open(path) as file: data = yaml.safe_load(file) return data class ActionBuilder: """Class for parsing and executing runcards. Args: runcard (path): path containing the runcard. folder (path): path for the output folder. force (bool): option to overwrite the output folder if it exists already. """ def __init__(self, runcard, folder=None, force=False): path, self.folder = self._generate_output_folder(folder, force) self.runcard = load_yaml(runcard) # Qibolab default backend if not provided in runcard. backend_name = self.runcard.get("backend", "qibolab") platform_name = self.runcard.get("platform", "dummy") self.backend, self.platform = self._allocate_backend( backend_name, platform_name, path ) self.qubits = self.runcard["qubits"] self.format = self.runcard["format"] # Saving runcard shutil.copy(runcard, f"{path}/runcard.yml") self.save_meta(path, self.folder) @staticmethod def _generate_output_folder(folder, force): """Static method for generating the output folder. Args: folder (path): path for the output folder. If None it will be created a folder automatically force (bool): option to overwrite the output folder if it exists already. """ if folder is None: import getpass e = datetime.datetime.now() user = getpass.getuser().replace(".", "-") date = e.strftime("%Y-%m-%d") folder = f"{date}-{'000'}-{user}" num = 0 while os.path.exists(folder): log.info(f"Directory {folder} already exists.") num += 1 folder = f"{date}-{str(num).rjust(3, '0')}-{user}" log.info(f"Trying to create directory {folder}") elif os.path.exists(folder) and not force: raise_error(RuntimeError, f"Directory {folder} already exists.") elif os.path.exists(folder) and force: log.warning(f"Deleting previous directory {folder}.") shutil.rmtree(os.path.join(os.getcwd(), folder)) path = os.path.join(os.getcwd(), folder) log.info(f"Creating directory {folder}.") os.makedirs(path) return path, folder def _allocate_backend(self, backend_name, platform_name, path): """Allocate the platform using Qibolab.""" from qibo.backends import GlobalBackend, set_backend if backend_name == "qibolab": from qibolab.paths import qibolab_folder original_runcard = qibolab_folder / "runcards" / f"{platform_name}.yml" # copy of the original runcard that will stay unmodified shutil.copy(original_runcard, f"{path}/platform.yml") # copy of the original runcard that will be modified during calibration updated_runcard = f"{self.folder}/new_platform.yml" shutil.copy(original_runcard, updated_runcard) # allocate backend with updated_runcard set_backend( backend=backend_name, platform=platform_name, runcard=updated_runcard ) backend = GlobalBackend() return backend, backend.platform else: set_backend(backend=backend_name, platform=platform_name) backend = GlobalBackend() return backend, None def save_meta(self, path, folder): import qibocal e = datetime.datetime.now(datetime.timezone.utc) meta = {} meta["title"] = folder meta["backend"] = str(self.backend) meta["platform"] = str(self.backend.platform) meta["date"] = e.strftime("%Y-%m-%d") meta["start-time"] = e.strftime("%H:%M:%S") meta["end-time"] = e.strftime("%H:%M:%S") meta["versions"] = self.backend.versions # pylint: disable=E1101 meta["versions"]["qibocal"] = qibocal.__version__ with open(f"{path}/meta.yml", "w") as file: yaml.dump(meta, file) def _build_single_action(self, name): """Helper method to parse the actions in the runcard.""" f = getattr(calibrations, name) path = os.path.join(self.folder, f"data/{name}/") os.makedirs(path) sig = inspect.signature(f) params = self.runcard["actions"][name] for param in list(sig.parameters)[2:-1]: if param not in params: raise_error(AttributeError, f"Missing parameter {param} in runcard.") if f.__annotations__["qubit"] == int: single_qubit_action = True else: single_qubit_action = False return f, params, path, single_qubit_action def execute(self): """Method to execute sequentially all the actions in the runcard.""" if self.platform is not None: self.platform.connect() self.platform.setup() self.platform.start() for action in self.runcard["actions"]: routine, args, path, single_qubit_action = self._build_single_action(action) self._execute_single_action(routine, args, path, single_qubit_action) if self.platform is not None: self.platform.stop() self.platform.disconnect() def _execute_single_action(self, routine, arguments, path, single_qubit): """Method to execute a single action and retrieving the results.""" if self.format is None: raise_error(ValueError, f"Cannot store data using {self.format} format.") if single_qubit: for qubit in self.qubits: results = routine(self.platform, qubit, **arguments) for data in results: getattr(data, f"to_{self.format}")(path) if self.platform is not None: self.update_platform_runcard(qubit, routine.__name__) else: results = routine(self.platform, self.qubits, **arguments) for data in results: getattr(data, f"to_{self.format}")(path) if self.platform is not None: self.update_platform_runcard(qubit, routine.__name__) def update_platform_runcard(self, qubit, routine): try: data_fit = Data.load_data( self.folder, routine, self.format, f"fit_q{qubit}" ) except: data_fit = Data() params = [i for i in list(data_fit.df.keys()) if "popt" not in i] settings = load_yaml(f"{self.folder}/new_platform.yml") for param in params: settings["characterization"]["single_qubit"][qubit][param] = int( data_fit.get_values(param) ) with open(f"{self.folder}/new_platform.yml", "w") as file: yaml.dump( settings, file, sort_keys=False, indent=4, default_flow_style=None ) def dump_report(self): from qibocal.web.report import create_report # update end time meta = load_yaml(f"{self.folder}/meta.yml") e = datetime.datetime.now(datetime.timezone.utc) meta["end-time"] = e.strftime("%H:%M:%S") with open(f"{self.folder}/meta.yml", "w") as file: yaml.dump(meta, file) create_report(self.folder) class ReportBuilder: """Parses routines and plots to report and live plotting page. Args: path (str): Path to the data folder to generate report for. """ def __init__(self, path): self.path = path self.metadata = load_yaml(os.path.join(path, "meta.yml")) # find proper path title base, self.title = os.path.join(os.getcwd(), path), "" while self.title in ("", "."): base, self.title = os.path.split(base) self.runcard = load_yaml(os.path.join(path, "runcard.yml")) self.format = self.runcard.get("format") self.qubits = self.runcard.get("qubits") # create calibration routine objects # (could be incorporated to :meth:`qibocal.cli.builders.ActionBuilder._build_single_action`) self.routines = [] for action in self.runcard.get("actions"): if hasattr(calibrations, action): routine = getattr(calibrations, action) else: raise_error(ValueError, f"Undefined action {action} in report.") if not hasattr(routine, "plots"): routine.plots = [] self.routines.append(routine) def get_routine_name(self, routine): """Prettify routine's name for report headers.""" return routine.__name__.replace("_", " ").title() def get_figure(self, routine, method, qubit): """Get html figure for report. Args: routine (Callable): Calibration method. method (Callable): Plot method. qubit (int): Qubit id. """ import tempfile figure = method(self.path, routine.__name__, qubit, self.format) with tempfile.NamedTemporaryFile() as temp: figure.write_html(temp.name, include_plotlyjs=False, full_html=False) fightml = temp.read().decode("utf-8") return fightml def get_live_figure(self, routine, method, qubit): """Get url to dash page for live plotting. This url is used by :meth:`qibocal.web.app.get_graph`. Args: routine (Callable): Calibration method. method (Callable): Plot method. qubit (int): Qubit id. """ return os.path.join( method.__name__, self.path, routine.__name__, str(qubit), self.format, ) src/qibocal/cli/_base.py METASEP """Adds global CLI options.""" import base64 import pathlib import shutil import socket import subprocess import uuid from urllib.parse import urljoin import click from qibo.config import log, raise_error from qibocal.cli.builders import ActionBuilder CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) # options for report upload UPLOAD_HOST = ( "qibocal@localhost" if socket.gethostname() == "saadiyat" else "[email protected]" ) TARGET_DIR = "qibocal-reports/" ROOT_URL = "http://login.qrccluster.com:9000/" @click.command(context_settings=CONTEXT_SETTINGS) @click.argument("runcard", metavar="RUNCARD", type=click.Path(exists=True)) @click.option( "folder", "-o", type=click.Path(), help="Output folder. If not provided a standard name will generated.", ) @click.option( "force", "-f", is_flag=True, help="Use --force option to overwrite the output folder.", ) def command(runcard, folder, force=None): """qibocal: Quantum Calibration Verification and Validation using Qibo. Arguments: - RUNCARD: runcard with declarative inputs. """ builder = ActionBuilder(runcard, folder, force) builder.execute() builder.dump_report() @click.command(context_settings=CONTEXT_SETTINGS) @click.option( "port", "-p", "--port", default=8050, type=int, help="Localhost port to launch dash server.", ) @click.option( "debug", "-d", "--debug", is_flag=True, help="Launch server in debugging mode.", ) def live_plot(port, debug): """Real time plotting of calibration data on a dash server.""" import socket from qibocal.web.app import app # change port if it is already used while True: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: if s.connect_ex(("localhost", port)) != 0: break port += 1 app.run_server(debug=debug, port=port) @click.command(context_settings=CONTEXT_SETTINGS) @click.argument("output_folder", metavar="FOLDER", type=click.Path(exists=True)) def upload(output_folder): """Uploads output folder to server""" output_path = pathlib.Path(output_folder) # check the rsync command exists. if not shutil.which("rsync"): raise_error( RuntimeError, "Could not find the rsync command. Please make sure it is installed.", ) # check that we can authentica with a certificate ssh_command_line = ( "ssh", "-o", "PreferredAuthentications=publickey", "-q", UPLOAD_HOST, "exit", ) str_line = " ".join(repr(ele) for ele in ssh_command_line) log.info(f"Checking SSH connection to {UPLOAD_HOST}.") try: subprocess.run(ssh_command_line, check=True) except subprocess.CalledProcessError as e: raise RuntimeError( ( "Could not validate the SSH key. " "The command\n%s\nreturned a non zero exit status. " "Please make sure that your public SSH key is on the server." ) % str_line ) from e except OSError as e: raise RuntimeError( "Could not run the command\n{}\n: {}".format(str_line, e) ) from e log.info("Connection seems OK.") # upload output randname = base64.urlsafe_b64encode(uuid.uuid4().bytes).decode() newdir = TARGET_DIR + randname rsync_command = ( "rsync", "-aLz", "--chmod=ug=rwx,o=rx", f"{output_path}/", f"{UPLOAD_HOST}:{newdir}", ) log.info(f"Uploading output ({output_path}) to {UPLOAD_HOST}") try: subprocess.run(rsync_command, check=True) except subprocess.CalledProcessError as e: msg = f"Failed to upload output: {e}" raise RuntimeError(msg) from e url = urljoin(ROOT_URL, randname) log.info(f"Upload completed. The result is available at:\n{url}") src/qibocal/cli/__init__.py METASEP """CLI entry point.""" from ._base import command, live_plot, upload src/qibocal/calibrations/characterization/utils.py METASEP import numpy as np def variable_resolution_scanrange( lowres_width, lowres_step, highres_width, highres_step ): """Helper function for sweeps.""" return np.concatenate( ( np.arange(-lowres_width, -highres_width, lowres_step), np.arange(-highres_width, highres_width, highres_step), np.arange(highres_width, lowres_width, lowres_step), ) ) src/qibocal/calibrations/characterization/t1.py METASEP import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import t1_fit @plot("MSR vs Time", plots.t1_time_msr_phase) def t1( platform: AbstractPlatform, qubit: int, delay_before_readout_start, delay_before_readout_end, delay_before_readout_step, software_averages, points=10, ): sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration) sequence.add(qd_pulse) sequence.add(ro_pulse) ro_wait_range = np.arange( delay_before_readout_start, delay_before_readout_end, delay_before_readout_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) data = DataUnits(name=f"data_q{qubit}", quantities={"Time": "ns"}) count = 0 for _ in range(software_averages): for wait in ro_wait_range: if count % points == 0 and count > 0: yield data yield t1_fit( data, x="Time[ns]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["t1"], ) ro_pulse.start = qd_pulse.duration + wait msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "Time[ns]": wait, } data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/resonator_spectroscopy.py METASEP import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.calibrations.characterization.utils import variable_resolution_scanrange from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import lorentzian_fit @plot("MSR and Phase vs Frequency", plots.frequency_msr_phase__fast_precision) def resonator_spectroscopy( platform: AbstractPlatform, qubit: int, lowres_width, lowres_step, highres_width, highres_step, precision_width, precision_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( variable_resolution_scanrange( lowres_width, lowres_step, highres_width, highres_step ) + resonator_frequency ) fast_sweep_data = DataUnits( name=f"fast_sweep_q{qubit}", quantities={"frequency": "Hz"} ) count = 0 for _ in range(software_averages): for freq in frequency_range: if count % points == 0 and count > 0: yield fast_sweep_data yield lorentzian_fit( fast_sweep_data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } fast_sweep_data.add(results) count += 1 yield fast_sweep_data if platform.resonator_type == "3D": resonator_frequency = fast_sweep_data.get_values("frequency", "Hz")[ np.argmax(fast_sweep_data.get_values("MSR", "V")) ] avg_voltage = ( np.mean( fast_sweep_data.get_values("MSR", "V")[: (lowres_width // lowres_step)] ) * 1e6 ) else: resonator_frequency = fast_sweep_data.get_values("frequency", "Hz")[ np.argmin(fast_sweep_data.get_values("MSR", "V")) ] avg_voltage = ( np.mean( fast_sweep_data.get_values("MSR", "V")[: (lowres_width // lowres_step)] ) * 1e6 ) precision_sweep__data = DataUnits( name=f"precision_sweep_q{qubit}", quantities={"frequency": "Hz"} ) freqrange = ( np.arange(-precision_width, precision_width, precision_step) + resonator_frequency ) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield precision_sweep__data yield lorentzian_fit( fast_sweep_data + precision_sweep__data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } precision_sweep__data.add(results) count += 1 yield precision_sweep__data @plot("Frequency vs Attenuation", plots.frequency_attenuation_msr_phase) @plot("MSR vs Frequency", plots.frequency_attenuation_msr_phase__cut) def resonator_punchout( platform: AbstractPlatform, qubit: int, freq_width, freq_step, min_att, max_att, step_att, software_averages, points=10, ): platform.reload_settings() data = DataUnits( name=f"data_q{qubit}", quantities={"frequency": "Hz", "attenuation": "dB"} ) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence = PulseSequence() sequence.add(ro_pulse) # TODO: move this explicit instruction to the platform resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency - (freq_width / 4) ) attenuation_range = np.flip(np.arange(min_att, max_att, step_att)) count = 0 for _ in range(software_averages): for att in attenuation_range: for freq in frequency_range: if count % points == 0: yield data # TODO: move these explicit instructions to the platform platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.ro_port[qubit].attenuation = att msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr * (np.exp(att / 10)), "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "attenuation[dB]": att, } # TODO: implement normalization data.add(results) count += 1 yield data @plot("MSR and Phase vs Flux Current", plots.frequency_flux_msr_phase) def resonator_spectroscopy_flux( platform: AbstractPlatform, qubit: int, freq_width, freq_step, current_max, current_min, current_step, software_averages, fluxline=0, points=10, ): platform.reload_settings() if fluxline == "qubit": fluxline = qubit sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) data = DataUnits( name=f"data_q{qubit}", quantities={"frequency": "Hz", "current": "A"} ) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] qubit_biasing_current = platform.characterization["single_qubit"][qubit][ "sweetspot" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) current_range = ( np.arange(current_min, current_max, current_step) + qubit_biasing_current ) count = 0 for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data # TODO: automatically extract the sweet spot current # TODO: add a method to generate the matrix @plot("MSR row 1 and Phase row 2", plots.frequency_flux_msr_phase__matrix) def resonator_spectroscopy_flux_matrix( platform: AbstractPlatform, qubit: int, freq_width, freq_step, current_min, current_max, current_step, fluxlines, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) current_range = np.arange(current_min, current_max, current_step) count = 0 for fluxline in fluxlines: fluxline = int(fluxline) print(fluxline) data = DataUnits( name=f"data_q{qubit}_f{fluxline}", quantities={"frequency": "Hz", "current": "A"}, ) for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data @plot("MSR and Phase vs Frequency", plots.dispersive_frequency_msr_phase) def dispersive_shift( platform: AbstractPlatform, qubit: int, freq_width, freq_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) sequence.add(ro_pulse) resonator_frequency = platform.characterization["single_qubit"][qubit][ "resonator_freq" ] frequency_range = ( np.arange(-freq_width, freq_width, freq_step) + resonator_frequency ) data_spec = DataUnits(name=f"data_q{qubit}", quantities={"frequency": "Hz"}) count = 0 for _ in range(software_averages): for freq in frequency_range: if count % points == 0 and count > 0: yield data_spec yield lorentzian_fit( data_spec, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } data_spec.add(results) count += 1 yield data_spec # Shifted Spectroscopy sequence = PulseSequence() RX_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX_pulse.finish) sequence.add(RX_pulse) sequence.add(ro_pulse) data_shifted = DataUnits( name=f"data_shifted_q{qubit}", quantities={"frequency": "Hz"} ) count = 0 for _ in range(software_averages): for freq in frequency_range: if count % points == 0 and count > 0: yield data_shifted yield lorentzian_fit( data_shifted, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["resonator_freq", "peak_voltage"], fit_file_name="fit_shifted", ) platform.ro_port[qubit].lo_frequency = freq - ro_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } data_shifted.add(results) count += 1 yield data_shifted src/qibocal/calibrations/characterization/ramsey.py METASEP import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import ramsey_fit @plot("MSR vs Time", plots.time_msr) def ramsey_frequency_detuned( platform: AbstractPlatform, qubit: int, t_start, t_end, t_step, n_osc, points=10, ): platform.reload_settings() sampling_rate = platform.sampling_rate data = DataUnits(name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"}) RX90_pulse1 = platform.create_RX90_pulse(qubit, start=0) RX90_pulse2 = platform.create_RX90_pulse(qubit, start=RX90_pulse1.finish) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX90_pulse2.finish) sequence = PulseSequence() sequence.add(RX90_pulse1) sequence.add(RX90_pulse2) sequence.add(ro_pulse) runcard_qubit_freq = platform.characterization["single_qubit"][qubit]["qubit_freq"] runcard_T2 = platform.characterization["single_qubit"][qubit]["T2"] intermediate_freq = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "frequency" ] current_qubit_freq = runcard_qubit_freq current_T2 = runcard_T2 # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - RX90_pulse1.frequency ) t_end = np.array(t_end) for t_max in t_end: count = 0 platform.qd_port[qubit].lo_frequency = current_qubit_freq - intermediate_freq offset_freq = n_osc / t_max * sampling_rate # Hz t_range = np.arange(t_start, t_max, t_step) for wait in t_range: if count % points == 0 and count > 0: yield data yield ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=current_qubit_freq, sampling_rate=sampling_rate, offset_freq=offset_freq, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) RX90_pulse2.start = RX90_pulse1.finish + wait RX90_pulse2.relative_phase = ( (RX90_pulse2.start / sampling_rate) * (2 * np.pi) * (-offset_freq) ) ro_pulse.start = RX90_pulse2.finish msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "wait[ns]": wait, "t_max[ns]": t_max, } data.add(results) count += 1 # # Fitting data_fit = ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=current_qubit_freq, sampling_rate=sampling_rate, offset_freq=offset_freq, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) new_t2 = data_fit.get_values("t2") corrected_qubit_freq = data_fit.get_values("corrected_qubit_frequency") # if ((new_t2 * 3.5) > t_max): if (new_t2 > current_T2).bool() and len(t_end) > 1: current_qubit_freq = int(corrected_qubit_freq) current_T2 = new_t2 data = DataUnits( name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"} ) else: corrected_qubit_freq = int(current_qubit_freq) new_t2 = current_T2 break yield data @plot("MSR vs Time", plots.time_msr) def ramsey( platform: AbstractPlatform, qubit: int, delay_between_pulses_start, delay_between_pulses_end, delay_between_pulses_step, software_averages, points=10, ): platform.reload_settings() sampling_rate = platform.sampling_rate qubit_freq = platform.characterization["single_qubit"][qubit]["qubit_freq"] RX90_pulse1 = platform.create_RX90_pulse(qubit, start=0) RX90_pulse2 = platform.create_RX90_pulse(qubit, start=RX90_pulse1.finish) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX90_pulse2.finish) sequence = PulseSequence() sequence.add(RX90_pulse1) sequence.add(RX90_pulse2) sequence.add(ro_pulse) waits = np.arange( delay_between_pulses_start, delay_between_pulses_end, delay_between_pulses_step, ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - RX90_pulse1.frequency ) data = DataUnits(name=f"data_q{qubit}", quantities={"wait": "ns", "t_max": "ns"}) count = 0 for _ in range(software_averages): for wait in waits: if count % points == 0 and count > 0: yield data yield ramsey_fit( data, x="wait[ns]", y="MSR[uV]", qubit=qubit, qubit_freq=qubit_freq, sampling_rate=sampling_rate, offset_freq=0, labels=[ "delta_frequency", "corrected_qubit_frequency", "t2", ], ) RX90_pulse2.start = RX90_pulse1.finish + wait ro_pulse.start = RX90_pulse2.finish msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "wait[ns]": wait, "t_max[ns]": delay_between_pulses_end, } data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/rabi_oscillations.py METASEP import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import rabi_fit @plot("MSR vs Time", plots.time_msr_phase) def rabi_pulse_length( platform: AbstractPlatform, qubit: int, pulse_duration_start, pulse_duration_end, pulse_duration_step, software_averages, points=10, ): platform.reload_settings() data = DataUnits(name=f"data_q{qubit}", quantities={"Time": "ns"}) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="Time[ns]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_duration", "pi_pulse_max_voltage", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "Time[ns]": duration, } data.add(results) count += 1 yield data @plot("MSR vs Gain", plots.gain_msr_phase) def rabi_pulse_gain( platform: AbstractPlatform, qubit: int, pulse_gain_start, pulse_gain_end, pulse_gain_step, software_averages, points=10, ): platform.reload_settings() data = DataUnits(name=f"data_q{qubit}", quantities={"gain": "dimensionless"}) sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.finish) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for gain in qd_pulse_gain_range: platform.qd_port[qubit].gain = gain if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="gain[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_gain", "pi_pulse_max_voltage", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "gain[dimensionless]": gain, } data.add(results) count += 1 yield data @plot("MSR vs Amplitude", plots.amplitude_msr_phase) def rabi_pulse_amplitude( platform, qubit: int, pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step, software_averages, points=10, ): platform.reload_settings() data = DataUnits(name=f"data_q{qubit}", quantities={"amplitude": "dimensionless"}) sequence = PulseSequence() qd_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=qd_pulse.duration) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_amplitude_range = np.arange( pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for amplitude in qd_pulse_amplitude_range: qd_pulse.amplitude = amplitude if count % points == 0 and count > 0: yield data yield rabi_fit( data, x="amplitude[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "pi_pulse_amplitude", "pi_pulse_max_voltage", ], ) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "amplitude[dimensionless]": amplitude, } data.add(results) count += 1 yield data @plot("MSR vs length and gain", plots.duration_gain_msr_phase) def rabi_pulse_length_and_gain( platform: AbstractPlatform, qubit: int, pulse_duration_start, pulse_duration_end, pulse_duration_step, pulse_gain_start, pulse_gain_end, pulse_gain_step, software_averages, points=10, ): platform.reload_settings() data = DataUnits( name=f"data_q{qubit}", quantities={"duration": "ns", "gain": "dimensionless"} ) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) qd_pulse_gain_range = np.arange(pulse_gain_start, pulse_gain_end, pulse_gain_step) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration for gain in qd_pulse_gain_range: platform.qd_port[qubit].gain = gain if count % points == 0 and count > 0: yield data msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "duration[ns]": duration, "gain[dimensionless]": gain, } data.add(results) count += 1 yield data @plot("MSR vs length and amplitude", plots.duration_amplitude_msr_phase) def rabi_pulse_length_and_amplitude( platform, qubit: int, pulse_duration_start, pulse_duration_end, pulse_duration_step, pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step, software_averages, points=10, ): platform.reload_settings() data = DataUnits( name=f"data_q{qubit}", quantities={"duration": "ns", "amplitude": "dimensionless"}, ) sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=4) sequence.add(qd_pulse) sequence.add(ro_pulse) qd_pulse_duration_range = np.arange( pulse_duration_start, pulse_duration_end, pulse_duration_step ) qd_pulse_amplitude_range = np.arange( pulse_amplitude_start, pulse_amplitude_end, pulse_amplitude_step ) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse.frequency ) count = 0 for _ in range(software_averages): for duration in qd_pulse_duration_range: qd_pulse.duration = duration ro_pulse.start = duration for amplitude in qd_pulse_amplitude_range: qd_pulse.amplitude = amplitude if count % points == 0: yield data msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "duration[ns]": duration, "amplitude[dimensionless]": amplitude, } data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/qubit_spectroscopy.py METASEP import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import lorentzian_fit @plot("MSR and Phase vs Frequency", plots.frequency_msr_phase__fast_precision) def qubit_spectroscopy( platform: AbstractPlatform, qubit: int, fast_start, fast_end, fast_step, precision_start, precision_end, precision_step, software_averages, points=10, ): platform.reload_settings() sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=5000) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=5000) sequence.add(qd_pulse) sequence.add(ro_pulse) qubit_frequency = platform.characterization["single_qubit"][qubit]["qubit_freq"] freqrange = np.arange(fast_start, fast_end, fast_step) + qubit_frequency data = DataUnits(quantities={"frequency": "Hz", "attenuation": "dB"}) # FIXME: Waiting for Qblox platform to take care of that platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) data = DataUnits(name=f"fast_sweep_q{qubit}", quantities={"frequency": "Hz"}) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield data yield lorentzian_fit( data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["qubit_freq", "peak_voltage"], ) platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } data.add(results) count += 1 yield data if platform.resonator_type == "3D": qubit_frequency = data.get_values("frequency", "Hz")[ np.argmin(data.get_values("MSR", "V")) ] avg_voltage = ( np.mean( data.get_values("MSR", "V")[: ((fast_end - fast_start) // fast_step)] ) * 1e6 ) else: qubit_frequency = data.get_values("frequency", "Hz")[ np.argmax(data.get_values("MSR", "V")) ] avg_voltage = ( np.mean( data.get_values("MSR", "V")[: ((fast_end - fast_start) // fast_step)] ) * 1e6 ) prec_data = DataUnits( name=f"precision_sweep_q{qubit}", quantities={"frequency": "Hz"} ) freqrange = ( np.arange(precision_start, precision_end, precision_step) + qubit_frequency ) count = 0 for _ in range(software_averages): for freq in freqrange: if count % points == 0 and count > 0: yield prec_data yield lorentzian_fit( data + prec_data, x="frequency[GHz]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=["qubit_freq", "peak_voltage"], ) platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, } prec_data.add(results) count += 1 yield prec_data # TODO: Estimate avg_voltage correctly @plot("MSR and Phase vs Frequency", plots.frequency_flux_msr_phase) def qubit_spectroscopy_flux( platform: AbstractPlatform, qubit: int, freq_width, freq_step, current_max, current_min, current_step, software_averages, fluxline, points=10, ): platform.reload_settings() if fluxline == "qubit": fluxline = qubit sequence = PulseSequence() qd_pulse = platform.create_qubit_drive_pulse(qubit, start=0, duration=5000) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=5000) sequence.add(qd_pulse) sequence.add(ro_pulse) data = DataUnits( name=f"data_q{qubit}", quantities={"frequency": "Hz", "current": "A"} ) qubit_frequency = platform.characterization["single_qubit"][qubit]["qubit_freq"] qubit_biasing_current = platform.characterization["single_qubit"][qubit][ "sweetspot" ] frequency_range = np.arange(-freq_width, freq_width, freq_step) + qubit_frequency current_range = ( np.arange(current_min, current_max, current_step) + qubit_biasing_current ) count = 0 for _ in range(software_averages): for curr in current_range: for freq in frequency_range: if count % points == 0: yield data platform.qd_port[qubit].lo_frequency = freq - qd_pulse.frequency platform.qf_port[fluxline].current = curr msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "frequency[Hz]": freq, "current[A]": curr, } # TODO: implement normalization data.add(results) count += 1 yield data src/qibocal/calibrations/characterization/flipping.py METASEP import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import flipping_fit @plot("MSR vs Flips", plots.flips_msr_phase) def flipping( platform: AbstractPlatform, qubit: int, niter, step, points=10, ): platform.reload_settings() pi_pulse_amplitude = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "amplitude" ] data = DataUnits(name=f"data_q{qubit}", quantities={"flips": "dimensionless"}) sequence = PulseSequence() RX90_pulse = platform.create_RX90_pulse(qubit, start=0) count = 0 # repeat N iter times for n in range(0, niter, step): if count % points == 0 and count > 0: yield data yield flipping_fit( data, x="flips[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], niter=niter, pi_pulse_amplitude=pi_pulse_amplitude, labels=["amplitude_delta", "corrected_amplitude"], ) sequence.add(RX90_pulse) # execute sequence RX(pi/2) - [RX(pi) - RX(pi)] from 0...n times - RO start1 = RX90_pulse.duration for j in range(n): RX_pulse1 = platform.create_RX_pulse(qubit, start=start1) start2 = start1 + RX_pulse1.duration RX_pulse2 = platform.create_RX_pulse(qubit, start=start2) sequence.add(RX_pulse1) sequence.add(RX_pulse2) start1 = start2 + RX_pulse2.duration # add ro pulse at the end of the sequence ro_pulse = platform.create_qubit_readout_pulse(qubit, start=start1) sequence.add(ro_pulse) msr, phase, i, q = platform.execute_pulse_sequence(sequence)[ro_pulse.serial] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "flips[dimensionless]": n, } data.add(results) count += 1 sequence = PulseSequence() yield data src/qibocal/calibrations/characterization/calibrate_qubit_states.py METASEP import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot @plot("exc vs gnd", plots.exc_gnd) def calibrate_qubit_states( platform: AbstractPlatform, qubit: int, niter, points=10, ): # create exc sequence exc_sequence = PulseSequence() RX_pulse = platform.create_RX_pulse(qubit, start=0) ro_pulse = platform.create_qubit_readout_pulse(qubit, start=RX_pulse.duration) exc_sequence.add(RX_pulse) exc_sequence.add(ro_pulse) # FIXME: Waiting to be able to pass qpucard to qibolab platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse.frequency ) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - RX_pulse.frequency ) data_exc = DataUnits(name=f"data_exc_q{qubit}", quantities={"iteration": "s"}) count = 0 for n in np.arange(niter): if count % points == 0: yield data_exc msr, phase, i, q = platform.execute_pulse_sequence(exc_sequence, nshots=1)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "iteration[s]": n, } data_exc.add(results) count += 1 yield data_exc gnd_sequence = PulseSequence() ro_pulse = platform.create_qubit_readout_pulse(qubit, start=0) gnd_sequence.add(ro_pulse) data_gnd = DataUnits(name=f"data_gnd_q{qubit}", quantities={"iteration": "s"}) count = 0 for n in np.arange(niter): if count % points == 0: yield data_gnd msr, phase, i, q = platform.execute_pulse_sequence(gnd_sequence, nshots=1)[ ro_pulse.serial ] results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[deg]": phase, "iteration[s]": n, } data_gnd.add(results) count += 1 yield data_gnd src/qibocal/calibrations/characterization/allXY.py METASEP import numpy as np from qibolab.platforms.abstract import AbstractPlatform from qibolab.pulses import PulseSequence from qibocal import plots from qibocal.data import DataUnits from qibocal.decorators import plot from qibocal.fitting.methods import drag_tunning_fit # allXY rotations gatelist = [ ["I", "I"], ["RX(pi)", "RX(pi)"], ["RY(pi)", "RY(pi)"], ["RX(pi)", "RY(pi)"], ["RY(pi)", "RX(pi)"], ["RX(pi/2)", "I"], ["RY(pi/2)", "I"], ["RX(pi/2)", "RY(pi/2)"], ["RY(pi/2)", "RX(pi/2)"], ["RX(pi/2)", "RY(pi)"], ["RY(pi/2)", "RX(pi)"], ["RX(pi)", "RY(pi/2)"], ["RY(pi)", "RX(pi/2)"], ["RX(pi/2)", "RX(pi)"], ["RX(pi)", "RX(pi/2)"], ["RY(pi/2)", "RY(pi)"], ["RY(pi)", "RY(pi/2)"], ["RX(pi)", "I"], ["RY(pi)", "I"], ["RX(pi/2)", "RX(pi/2)"], ["RY(pi/2)", "RY(pi/2)"], ] @plot("Prob vs gate sequence", plots.prob_gate) def allXY( platform: AbstractPlatform, qubit: int, beta_param=None, software_averages=1, points=10, ): state0_voltage = complex( platform.characterization["single_qubit"][qubit]["state0_voltage"] ) state1_voltage = complex( platform.characterization["single_qubit"][qubit]["state1_voltage"] ) data = DataUnits( name=f"data_q{qubit}", quantities={"probability": "dimensionless", "gateNumber": "dimensionless"}, ) # FIXME: Waiting to be able to pass qpucard to qibolab ro_pulse_test = platform.create_qubit_readout_pulse(qubit, start=4) platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse_test.frequency ) qd_pulse_test = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse_test.frequency ) count = 0 for _ in range(software_averages): gateNumber = 1 for gates in gatelist: if count % points == 0 and count > 0: yield data seq, ro_pulse = _get_sequence_from_gate_pair( platform, gates, qubit, beta_param ) seq.add(ro_pulse) msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=2048)[ ro_pulse.serial ] prob = np.abs(msr * 1e6 - state1_voltage) / np.abs( state1_voltage - state0_voltage ) prob = (2 * prob) - 1 results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "probability[dimensionless]": prob, "gateNumber[dimensionless]": gateNumber, } data.add(results) count += 1 gateNumber += 1 yield data @plot("Prob vs gate sequence", plots.prob_gate_iteration) def allXY_iteration( platform: AbstractPlatform, qubit: int, beta_start, beta_end, beta_step, software_averages=1, points=10, ): # FIXME: Waiting to be able to pass qpucard to qibolab ro_pulse_test = platform.create_qubit_readout_pulse(qubit, start=4) platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse_test.frequency ) qd_pulse_test = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse_test.frequency ) state0_voltage = complex( platform.characterization["single_qubit"][qubit]["state0_voltage"] ) state1_voltage = complex( platform.characterization["single_qubit"][qubit]["state1_voltage"] ) data = DataUnits( name=f"data_q{qubit}", quantities={ "probability": "dimensionless", "gateNumber": "dimensionless", "beta_param": "dimensionless", }, ) count = 0 for _ in range(software_averages): for beta_param in np.arange(beta_start, beta_end, beta_step).round(4): gateNumber = 1 for gates in gatelist: if count % points == 0 and count > 0: yield data seq, ro_pulse = _get_sequence_from_gate_pair( platform, gates, qubit, beta_param ) seq.add(ro_pulse) msr, phase, i, q = platform.execute_pulse_sequence(seq, nshots=1024)[ ro_pulse.serial ] prob = np.abs(msr * 1e6 - state1_voltage) / np.abs( state1_voltage - state0_voltage ) prob = (2 * prob) - 1 results = { "MSR[V]": msr, "i[V]": i, "q[V]": q, "phase[rad]": phase, "probability[dimensionless]": prob, "gateNumber[dimensionless]": gateNumber, "beta_param[dimensionless]": beta_param, } data.add(results) count += 1 gateNumber += 1 yield data @plot("MSR vs beta parameter", plots.msr_beta) def drag_pulse_tunning( platform: AbstractPlatform, qubit: int, beta_start, beta_end, beta_step, points=10, ): # platform.reload_settings() # FIXME: Waiting to be able to pass qpucard to qibolab ro_pulse_test = platform.create_qubit_readout_pulse(qubit, start=4) platform.ro_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["resonator_freq"] - ro_pulse_test.frequency ) qd_pulse_test = platform.create_qubit_drive_pulse(qubit, start=0, duration=4) platform.qd_port[qubit].lo_frequency = ( platform.characterization["single_qubit"][qubit]["qubit_freq"] - qd_pulse_test.frequency ) data = DataUnits(name=f"data_q{qubit}", quantities={"beta_param": "dimensionless"}) count = 0 for beta_param in np.arange(beta_start, beta_end, beta_step).round(4): if count % points == 0 and count > 0: yield data yield drag_tunning_fit( data, x="beta_param[dimensionless]", y="MSR[uV]", qubit=qubit, nqubits=platform.settings["nqubits"], labels=[ "optimal_beta_param", ], ) # drag pulse RX(pi/2) RX90_drag_pulse = platform.create_RX90_drag_pulse( qubit, start=0, beta=beta_param ) # drag pulse RY(pi) RY_drag_pulse = platform.create_RX_drag_pulse( qubit, start=RX90_drag_pulse.finish, relative_phase=+np.pi / 2, beta=beta_param, ) # RO pulse ro_pulse = platform.create_qubit_readout_pulse( qubit, start=RY_drag_pulse.finish ) # Rx(pi/2) - Ry(pi) - Ro seq1 = PulseSequence() seq1.add(RX90_drag_pulse) seq1.add(RY_drag_pulse) seq1.add(ro_pulse) msr1, i1, q1, phase1 = platform.execute_pulse_sequence(seq1)[ro_pulse.serial] # drag pulse RY(pi/2) RY90_drag_pulse = platform.create_RX90_drag_pulse( qubit, start=0, relative_phase=np.pi / 2, beta=beta_param ) # drag pulse RX(pi) RX_drag_pulse = platform.create_RX_drag_pulse( qubit, start=RY90_drag_pulse.finish, beta=beta_param ) # Ry(pi/2) - Rx(pi) - Ro seq2 = PulseSequence() seq2.add(RY90_drag_pulse) seq2.add(RX_drag_pulse) seq2.add(ro_pulse) msr2, phase2, i2, q2 = platform.execute_pulse_sequence(seq2)[ro_pulse.serial] results = { "MSR[V]": msr1 - msr2, "i[V]": i1 - i2, "q[V]": q1 - q2, "phase[deg]": phase1 - phase2, "beta_param[dimensionless]": beta_param, } data.add(results) count += 1 yield data def _get_sequence_from_gate_pair(platform: AbstractPlatform, gates, qubit, beta_param): pulse_duration = platform.settings["native_gates"]["single_qubit"][qubit]["RX"][ "duration" ] # All gates have equal pulse duration sequence = PulseSequence() sequenceDuration = 0 pulse_start = 0 for gate in gates: if gate == "I": # print("Transforming to sequence I gate") pass if gate == "RX(pi)": # print("Transforming to sequence RX(pi) gate") if beta_param == None: RX_pulse = platform.create_RX_pulse( qubit, start=pulse_start, ) else: RX_pulse = platform.create_RX_drag_pulse( qubit, start=pulse_start, beta=beta_param, ) sequence.add(RX_pulse) if gate == "RX(pi/2)": # print("Transforming to sequence RX(pi/2) gate") if beta_param == None: RX90_pulse = platform.create_RX90_pulse( qubit, start=pulse_start, ) else: RX90_pulse = platform.create_RX90_drag_pulse( qubit, start=pulse_start, beta=beta_param, ) sequence.add(RX90_pulse) if gate == "RY(pi)": # print("Transforming to sequence RY(pi) gate") if beta_param == None: RY_pulse = platform.create_RX_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, ) else: RY_pulse = platform.create_RX_drag_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, beta=beta_param, ) sequence.add(RY_pulse) if gate == "RY(pi/2)": # print("Transforming to sequence RY(pi/2) gate") if beta_param == None: RY90_pulse = platform.create_RX90_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, ) else: RY90_pulse = platform.create_RX90_drag_pulse( qubit, start=pulse_start, relative_phase=np.pi / 2, beta=beta_param, ) sequence.add(RY90_pulse) sequenceDuration = sequenceDuration + pulse_duration pulse_start = pulse_duration # RO pulse starting just after pair of gates ro_pulse = platform.create_qubit_readout_pulse(qubit, start=sequenceDuration + 4) return sequence, ro_pulse src/qibocal/calibrations/characterization/__init__.py METASEP src/qibocal/decorators.py METASEP """Decorators implementation.""" import os from qibocal.config import raise_error def plot(header, method): """Decorator for adding plots in the report and live plotting page. Args: header (str): Header of the plot to use in the report. method (Callable): Plotting method defined under ``qibocal.plots``. """ def wrapped(f): if hasattr(f, "plots"): # insert in the beginning of the list to have # proper plot ordering in the report f.plots.insert(0, (header, method)) else: f.plots = [(header, method)] return f return wrapped src/qibocal/data.py METASEP """Implementation of DataUnits and Data class to store calibration routines outputs.""" import re from abc import abstractmethod import numpy as np import pandas as pd import pint_pandas from qibocal.config import raise_error class AbstractData: def __init__(self, name=None): if name is None: self.name = "data" else: self.name = name self.df = pd.DataFrame() self.quantities = None def __add__(self, data): self.df = pd.concat([self.df, data.df], ignore_index=True) return self @abstractmethod def add(self, data): raise_error(NotImplementedError) def __len__(self): """Computes the length of the data.""" return len(self.df) @classmethod def load_data(cls, folder, routine, format, name): raise_error(NotImplementedError) @abstractmethod def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" if self.quantities == None: self.df.to_csv(f"{path}/{self.name}.csv") else: self.df.pint.dequantify().to_csv(f"{path}/{self.name}.csv") def to_pickle(self, path): """Save data in pickel file. Args: path (str): Path containing output folder.""" self.df.to_pickle(f"{path}/{self.name}.pkl") class DataUnits(AbstractData): """Class to store the data measured during the calibration routines. It is a wrapper to a pandas DataFrame with units of measure from the Pint library. Args: quantities (dict): dictionary containing additional quantities that the user may save other than the pulse sequence output. The keys are the name of the quantities and the corresponding values are the units of measure. options (list): list containing additional values to be saved. """ def __init__(self, name=None, quantities=None, options=None): super().__init__(name=name) self._df = pd.DataFrame( { "MSR": pd.Series(dtype="pint[V]"), "i": pd.Series(dtype="pint[V]"), "q": pd.Series(dtype="pint[V]"), "phase": pd.Series(dtype="pint[deg]"), } ) self.quantities = {"MSR": "V", "i": "V", "q": "V", "phase": "rad"} self.options = [] if quantities is not None: self.quantities.update(quantities) for name, unit in quantities.items(): self.df.insert(0, name, pd.Series(dtype=f"pint[{unit}]")) if options is not None: self.options = options for option in options: self.df.insert( # pylint: disable=E1101 0, option, pd.Series(dtype=object) ) from pint import UnitRegistry self.ureg = UnitRegistry() @property def df(self): return self._df @df.setter def df(self, df): """Set df attribute. Args: df (pd.DataFrame): pandas DataFrame. Every key should have the following form: ``<name>[<unit>]``. """ if isinstance(df, pd.DataFrame): self._df = df else: raise_error(TypeError, f"{df.type} is not a pd.DataFrame.") def load_data_from_dict(self, data: dict): """Set df attribute. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ processed_data = {} for key, values in data.items(): if "[" in key: name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) processed_data[name] = pd.Series( data=(np.array(values) * self.ureg(unit)), dtype=f"pint[{unit}]" ) else: processed_data[key] = pd.Series(data=(values), dtype=object) self._df = pd.DataFrame(processed_data) def add(self, data): """Add a row to `DataUnits`. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ l = len(self) for key, value in data.items(): if "[" in key: name = key.split("[")[0] unit = re.search(r"\[([A-Za-z0-9_]+)\]", key).group(1) # TODO: find a better way to do this self.df.loc[l, name] = np.array(value) * self.ureg(unit) else: self.df.loc[l, key] = value def get_values(self, key, unit=None): """Get values of a quantity in specified units. Args: quantity (str): Quantity to get the values of. unit (str): Unit of the returned values. Returns: ``pd.Series`` with the quantity values in the given units. """ if unit is None: return self.df[key] else: return self.df[key].pint.to(unit).pint.magnitude @classmethod def load_data(cls, folder, routine, format, name): """Load data from specific format. Args: folder (path): path to the output folder from which the data will be loaded routine (str): calibration routine data to be loaded format (str): data format. Possible choices are 'csv' and 'pickle'. Returns: data (``DataUnits``): dataset object with the loaded data. """ obj = cls() if format == "csv": file = f"{folder}/data/{routine}/{name}.csv" obj.df = pd.read_csv(file, header=[0, 1]) obj.df.pop("Unnamed: 0_level_0") quantities_label = [] obj.options = [] for column in obj.df.columns: # pylint: disable=E1101 if "Unnamed" not in column[1]: quantities_label.append(column[0]) else: obj.options.append(column[0]) quantities_df = obj.df[quantities_label].pint.quantify() options_df = obj.df[obj.options] options_df.columns = options_df.columns.droplevel(1) obj.df = pd.concat([quantities_df, options_df], axis=1) elif format == "pickle": file = f"{folder}/data/{routine}/{name}.pkl" obj.df = pd.read_pickle(file) else: raise_error(ValueError, f"Cannot load data using {format} format.") return obj def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" data = self.df[list(self.quantities)].pint.dequantify() firsts = data.index.get_level_values(None) data[self.options] = self.df[self.options].loc[firsts].values data.to_csv(f"{path}/{self.name}.csv") class Data(AbstractData): """Class to store the data obtained from calibration routines. It is a wrapper to a pandas DataFrame. Args: quantities (dict): dictionary quantities to be saved. """ def __init__(self, name=None, quantities=None): super().__init__(name=name) if quantities is not None: self.quantities = quantities for name in quantities: self.df.insert(0, name, pd.Series(dtype=object)) @property def df(self): return self._df @df.setter def df(self, data): """Set df attribute. Args: df (pd.DataFrame): """ if isinstance(data, pd.DataFrame): self._df = data def load_data_from_dict(self, data: dict): """Set df attribute. Args: df (dict): dictionary containing the data to be added. """ processed_data = {} for key, values in data.items(): processed_data[key] = pd.Series(data=(values), dtype=object) self._df = pd.DataFrame(processed_data) def add(self, data): """Add a row to data. Args: data (dict): dictionary containing the data to be added. Every key should have the following form: ``<name>[<unit>]``. """ l = len(self) for key, value in data.items(): self.df.loc[l, key] = value def get_values(self, quantity): """Get values of a quantity in specified units. Args: quantity (str): Quantity to get the values of. Returns: ``pd.Series`` with the quantity values in the given units. """ return self.df[quantity].values @classmethod def load_data(cls, folder, routine, format, name): """Load data from specific format. Args: folder (path): path to the output folder from which the data will be loaded routine (str): calibration routine data to be loaded format (str): data format. Possible choices are 'csv' and 'pickle'. Returns: data (``Data``): data object with the loaded data. """ obj = cls() if format == "csv": file = f"{folder}/data/{routine}/{name}.csv" obj.df = pd.read_csv(file) obj.df.pop("Unnamed: 0") elif format == "pickle": file = f"{folder}/data/{routine}/{name}.pkl" obj.df = pd.read_pickle(file) else: raise_error(ValueError, f"Cannot load data using {format} format.") return obj def to_csv(self, path): """Save data in csv file. Args: path (str): Path containing output folder.""" self.df.to_csv(f"{path}/{self.name}.csv") def to_pickle(self, path): """Save data in pickel file. Args: path (str): Path containing output folder.""" self.df.to_pickle(f"{path}/{self.name}.pkl") src/qibocal/config.py METASEP """Custom logger implemenation.""" import logging import os # Logging levels available here https://docs.python.org/3/library/logging.html#logging-levels QIBOCAL_LOG_LEVEL = 30 if "QIBOCAL_LOG_LEVEL" in os.environ: # pragma: no cover QIBOCAL_LOG_LEVEL = 10 * int(os.environ.get("QIBOCAL_LOG_LEVEL")) def raise_error(exception, message=None, args=None): """Raise exception with logging error. Args: exception (Exception): python exception. message (str): the error message. """ log.error(message) if args: raise exception(message, args) else: raise exception(message) # Configuration for logging mechanism class CustomHandler(logging.StreamHandler): """Custom handler for logging algorithm.""" def format(self, record): """Format the record with specific format.""" from qibocal import __version__ fmt = f"[Qibocal {__version__}|%(levelname)s|%(asctime)s]: %(message)s" grey = "\x1b[38;20m" green = "\x1b[92m" yellow = "\x1b[33;20m" red = "\x1b[31;20m" bold_red = "\x1b[31;1m" reset = "\x1b[0m" self.FORMATS = { logging.DEBUG: green + fmt + reset, logging.INFO: grey + fmt + reset, logging.WARNING: yellow + fmt + reset, logging.ERROR: red + fmt + reset, logging.CRITICAL: bold_red + fmt + reset, } log_fmt = self.FORMATS.get(record.levelno) return logging.Formatter(log_fmt, datefmt="%Y-%m-%d %H:%M:%S").format(record) # allocate logger object log = logging.getLogger(__name__) log.setLevel(QIBOCAL_LOG_LEVEL) log.addHandler(CustomHandler()) src/qibocal/__init__.py METASEP from .cli import command, live_plot, upload """qibocal: Quantum Calibration Verification and Validation using Qibo.""" import importlib.metadata as im __version__ = im.version(__package__) src/qibocal/calibrations/__init__.py METASEP from qibocal.calibrations.characterization.allXY import * from qibocal.calibrations.characterization.calibrate_qubit_states import * from qibocal.calibrations.characterization.flipping import * from qibocal.calibrations.characterization.qubit_spectroscopy import * from qibocal.calibrations.characterization.rabi_oscillations import * from qibocal.calibrations.characterization.ramsey import * from qibocal.calibrations.characterization.resonator_spectroscopy import * from qibocal.calibrations.characterization.t1 import * from qibocal.calibrations.protocols.test import * src/qibocal/calibrations/protocols/test.py METASEP from qibo import gates, models from qibocal.data import Data def test( platform, qubit: list, nshots, points=1, ): data = Data("test", quantities=["nshots", "probabilities"]) nqubits = len(qubit) circuit = models.Circuit(nqubits) circuit.add(gates.H(qubit[0])) circuit.add(gates.H(qubit[1])) # circuit.add(gates.H(1)) circuit.add(gates.M(*qubit)) execution = circuit(nshots=nshots) data.add({"nshots": nshots, "probabilities": execution.probabilities()}) yield data src/qibocal/calibrations/protocols/__init__.py METASEP src/qibocal/calibrations/protocols/abstract.py METASEP
[ { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}", "type": "infile" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n", "type": "infile" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:", "type": "infile" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [", "type": "infile" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]", "type": "infile" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)", "type": "infile" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()", "type": "infile" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(", "type": "infile" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(", "type": "infile" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.df = dataframe\n self.all_figures = []\n\n def extract(self, group_by: str, output: str, agg_type: str):\n \"\"\"Aggregates the dataframe, extracts the data by which the frame was\n grouped, what was calculated given the ``agg_type`` parameters.\n\n Args:\n group_by (str): _description_\n output (str): _description_\n agg_type (str): _description_\n \"\"\"\n grouped_df = self.df.groupby(group_by)[output].apply(agg_type)\n return np.array(grouped_df.index), np.array(grouped_df.values.tolist())\n \n def scatter_fit_fig(self, xdata_scatter, ydata_scatter, xdata, ydata):\n myfigs = []\n popt, pcov, x_fit, y_fit = self.fitting_func(xdata, ydata)\n fig = go.Scatter(\n x=xdata_scatter,\n y=ydata_scatter,\n line=dict(color=\"#6597aa\"),\n mode=\"markers\",\n marker={\"opacity\": 0.2, \"symbol\": \"square\"},\n name=\"runs\",\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=xdata, y=ydata, line=dict(color=\"#aa6464\"), mode=\"markers\", name=\"average\"\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=x_fit,\n y=y_fit,\n name=\"A: {:.3f}, p: {:.3f}, B: {:.3f}\".format(popt[0], popt[1], popt[2]),\n line=go.scatter.Line(dash=\"dot\"),\n )\n myfigs.append(fig)\n self.all_figures.append({'figs' : myfigs})\n\n def report(self):\n from plotly.subplots import make_subplots\n\n l = len(self.all_figures)\n subplot_titles = [figdict.get('subplot_title') for figdict in self.all_figures]\n fig = make_subplots(\n rows=l, cols=1 if len == 1 else 2,\n subplot_titles = subplot_titles)\n for count, fig_dict in enumerate(self.all_figures):\n plot_list = fig_dict['figs']\n for plot in plot_list:\n fig.add_trace(plot, row=count//2 + 1, col = count%2+1)\n fig.update_xaxes(title_font_size=18, tickfont_size=16)\n fig.update_yaxes(title_font_size=18, tickfont_size=16)\n fig.update_layout(\n font_family=\"Averta\",\n hoverlabel_font_family=\"Averta\",\n title_text=\"Report\",\n hoverlabel_font_size=16,\n showlegend=True,\n height=500 * int(l/2),\n width=1000,\n )\n return fig\n\n\ndef embed_unitary_circuit(circuit: Circuit, nqubits: int, support: list) -> Circuit:\n \"\"\"Takes a circuit and redistributes the gates to the support of\n a new circuit with ``nqubits`` qubits.\n\n Args:\n circuit (Circuit): The circuit with len(``support``) many qubits.\n nqubits (int): Qubits of new circuit.\n support (list): The qubits were the gates should be places.\n\n Returns:\n Circuit: Circuit with redistributed gates.\n \"\"\"\n\n idxmap = np.vectorize(lambda idx: support[idx])\n newcircuit = Circuit(nqubits)\n for gate in circuit.queue:", "type": "infile" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:", "type": "infile" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.df = dataframe\n self.all_figures = []\n\n def extract(self, group_by: str, output: str, agg_type: str):\n \"\"\"Aggregates the dataframe, extracts the data by which the frame was\n grouped, what was calculated given the ``agg_type`` parameters.\n\n Args:\n group_by (str): _description_\n output (str): _description_\n agg_type (str): _description_\n \"\"\"\n grouped_df = self.df.groupby(group_by)[output].apply(agg_type)\n return np.array(grouped_df.index), np.array(grouped_df.values.tolist())\n \n def scatter_fit_fig(self, xdata_scatter, ydata_scatter, xdata, ydata):\n myfigs = []\n popt, pcov, x_fit, y_fit = self.fitting_func(xdata, ydata)\n fig = go.Scatter(\n x=xdata_scatter,\n y=ydata_scatter,\n line=dict(color=\"#6597aa\"),\n mode=\"markers\",\n marker={\"opacity\": 0.2, \"symbol\": \"square\"},\n name=\"runs\",\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=xdata, y=ydata, line=dict(color=\"#aa6464\"), mode=\"markers\", name=\"average\"\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=x_fit,\n y=y_fit,", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.df = dataframe\n self.all_figures = []\n\n def extract(self, group_by: str, output: str, agg_type: str):\n \"\"\"Aggregates the dataframe, extracts the data by which the frame was\n grouped, what was calculated given the ``agg_type`` parameters.\n\n Args:\n group_by (str): _description_\n output (str): _description_\n agg_type (str): _description_\n \"\"\"", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.df = dataframe\n self.all_figures = []\n\n def extract(self, group_by: str, output: str, agg_type: str):\n \"\"\"Aggregates the dataframe, extracts the data by which the frame was\n grouped, what was calculated given the ``agg_type`` parameters.\n\n Args:\n group_by (str): _description_\n output (str): _description_\n agg_type (str): _description_\n \"\"\"\n grouped_df = self.df.groupby(group_by)[output].apply(agg_type)\n return np.array(grouped_df.index), np.array(grouped_df.values.tolist())\n \n def scatter_fit_fig(self, xdata_scatter, ydata_scatter, xdata, ydata):\n myfigs = []\n popt, pcov, x_fit, y_fit = self.fitting_func(xdata, ydata)\n fig = go.Scatter(\n x=xdata_scatter,\n y=ydata_scatter,\n line=dict(color=\"#6597aa\"),\n mode=\"markers\",\n marker={\"opacity\": 0.2, \"symbol\": \"square\"},\n name=\"runs\",\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=xdata, y=ydata, line=dict(color=\"#aa6464\"), mode=\"markers\", name=\"average\"\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=x_fit,\n y=y_fit,\n name=\"A: {:.3f}, p: {:.3f}, B: {:.3f}\".format(popt[0], popt[1], popt[2]),\n line=go.scatter.Line(dash=\"dot\"),\n )\n myfigs.append(fig)\n self.all_figures.append({'figs' : myfigs})\n\n def report(self):\n from plotly.subplots import make_subplots\n\n l = len(self.all_figures)\n subplot_titles = [figdict.get('subplot_title') for figdict in self.all_figures]\n fig = make_subplots(\n rows=l, cols=1 if len == 1 else 2,\n subplot_titles = subplot_titles)\n for count, fig_dict in enumerate(self.all_figures):\n plot_list = fig_dict['figs']", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.df = dataframe\n self.all_figures = []\n\n def extract(self, group_by: str, output: str, agg_type: str):\n \"\"\"Aggregates the dataframe, extracts the data by which the frame was\n grouped, what was calculated given the ``agg_type`` parameters.\n\n Args:\n group_by (str): _description_\n output (str): _description_\n agg_type (str): _description_\n \"\"\"\n grouped_df = self.df.groupby(group_by)[output].apply(agg_type)\n return np.array(grouped_df.index), np.array(grouped_df.values.tolist())\n \n def scatter_fit_fig(self, xdata_scatter, ydata_scatter, xdata, ydata):\n myfigs = []\n popt, pcov, x_fit, y_fit = self.fitting_func(xdata, ydata)\n fig = go.Scatter(\n x=xdata_scatter,\n y=ydata_scatter,\n line=dict(color=\"#6597aa\"),\n mode=\"markers\",\n marker={\"opacity\": 0.2, \"symbol\": \"square\"},\n name=\"runs\",\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=xdata, y=ydata, line=dict(color=\"#aa6464\"), mode=\"markers\", name=\"average\"\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=x_fit,\n y=y_fit,\n name=\"A: {:.3f}, p: {:.3f}, B: {:.3f}\".format(popt[0], popt[1], popt[2]),\n line=go.scatter.Line(dash=\"dot\"),\n )\n myfigs.append(fig)\n self.all_figures.append({'figs' : myfigs})\n\n def report(self):\n from plotly.subplots import make_subplots\n\n l = len(self.all_figures)\n subplot_titles = [figdict.get('subplot_title') for figdict in self.all_figures]\n fig = make_subplots(\n rows=l, cols=1 if len == 1 else 2,\n subplot_titles = subplot_titles)\n for count, fig_dict in enumerate(self.all_figures):\n plot_list = fig_dict['figs']\n for plot in plot_list:", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.df = dataframe\n self.all_figures = []\n\n def extract(self, group_by: str, output: str, agg_type: str):\n \"\"\"Aggregates the dataframe, extracts the data by which the frame was\n grouped, what was calculated given the ``agg_type`` parameters.\n\n Args:\n group_by (str): _description_\n output (str): _description_\n agg_type (str): _description_\n \"\"\"\n grouped_df = self.df.groupby(group_by)[output].apply(agg_type)\n return np.array(grouped_df.index), np.array(grouped_df.values.tolist())\n \n def scatter_fit_fig(self, xdata_scatter, ydata_scatter, xdata, ydata):\n myfigs = []\n popt, pcov, x_fit, y_fit = self.fitting_func(xdata, ydata)\n fig = go.Scatter(\n x=xdata_scatter,\n y=ydata_scatter,\n line=dict(color=\"#6597aa\"),\n mode=\"markers\",\n marker={\"opacity\": 0.2, \"symbol\": \"square\"},\n name=\"runs\",\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=xdata, y=ydata, line=dict(color=\"#aa6464\"), mode=\"markers\", name=\"average\"\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=x_fit,\n y=y_fit,\n name=\"A: {:.3f}, p: {:.3f}, B: {:.3f}\".format(popt[0], popt[1], popt[2]),\n line=go.scatter.Line(dash=\"dot\"),\n )\n myfigs.append(fig)\n self.all_figures.append({'figs' : myfigs})\n\n def report(self):\n from plotly.subplots import make_subplots\n\n l = len(self.all_figures)\n subplot_titles = [figdict.get('subplot_title') for figdict in self.all_figures]\n fig = make_subplots(\n rows=l, cols=1 if len == 1 else 2,\n subplot_titles = subplot_titles)\n for count, fig_dict in enumerate(self.all_figures):\n plot_list = fig_dict['figs']\n for plot in plot_list:\n fig.add_trace(plot, row=count//2 + 1, col = count%2+1)\n fig.update_xaxes(title_font_size=18, tickfont_size=16)\n fig.update_yaxes(title_font_size=18, tickfont_size=16)\n fig.update_layout(\n font_family=\"Averta\",\n hoverlabel_font_family=\"Averta\",\n title_text=\"Report\",\n hoverlabel_font_size=16,\n showlegend=True,\n height=500 * int(l/2),\n width=1000,\n )\n return fig\n\n\ndef embed_unitary_circuit(circuit: Circuit, nqubits: int, support: list) -> Circuit:\n \"\"\"Takes a circuit and redistributes the gates to the support of\n a new circuit with ``nqubits`` qubits.\n\n Args:\n circuit (Circuit): The circuit with len(``support``) many qubits.\n nqubits (int): Qubits of new circuit.\n support (list): The qubits were the gates should be places.\n\n Returns:\n Circuit: Circuit with redistributed gates.\n \"\"\"\n\n idxmap = np.vectorize(lambda idx: support[idx])\n newcircuit = Circuit(nqubits)\n for gate in circuit.queue:\n if not isinstance(gate, gates.measurements.M):", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.df = dataframe\n self.all_figures = []\n\n def extract(self, group_by: str, output: str, agg_type: str):\n \"\"\"Aggregates the dataframe, extracts the data by which the frame was\n grouped, what was calculated given the ``agg_type`` parameters.\n\n Args:\n group_by (str): _description_\n output (str): _description_\n agg_type (str): _description_\n \"\"\"\n grouped_df = self.df.groupby(group_by)[output].apply(agg_type)\n return np.array(grouped_df.index), np.array(grouped_df.values.tolist())\n \n def scatter_fit_fig(self, xdata_scatter, ydata_scatter, xdata, ydata):\n myfigs = []\n popt, pcov, x_fit, y_fit = self.fitting_func(xdata, ydata)\n fig = go.Scatter(\n x=xdata_scatter,\n y=ydata_scatter,\n line=dict(color=\"#6597aa\"),\n mode=\"markers\",\n marker={\"opacity\": 0.2, \"symbol\": \"square\"},\n name=\"runs\",\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=xdata, y=ydata, line=dict(color=\"#aa6464\"), mode=\"markers\", name=\"average\"\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=x_fit,\n y=y_fit,\n name=\"A: {:.3f}, p: {:.3f}, B: {:.3f}\".format(popt[0], popt[1], popt[2]),\n line=go.scatter.Line(dash=\"dot\"),\n )\n myfigs.append(fig)\n self.all_figures.append({'figs' : myfigs})\n\n def report(self):\n from plotly.subplots import make_subplots\n\n l = len(self.all_figures)\n subplot_titles = [figdict.get('subplot_title') for figdict in self.all_figures]\n fig = make_subplots(\n rows=l, cols=1 if len == 1 else 2,\n subplot_titles = subplot_titles)\n for count, fig_dict in enumerate(self.all_figures):\n plot_list = fig_dict['figs']\n for plot in plot_list:\n fig.add_trace(plot, row=count//2 + 1, col = count%2+1)\n fig.update_xaxes(title_font_size=18, tickfont_size=16)\n fig.update_yaxes(title_font_size=18, tickfont_size=16)\n fig.update_layout(\n font_family=\"Averta\",\n hoverlabel_font_family=\"Averta\",\n title_text=\"Report\",\n hoverlabel_font_size=16,\n showlegend=True,\n height=500 * int(l/2),\n width=1000,\n )\n return fig\n\n\ndef embed_unitary_circuit(circuit: Circuit, nqubits: int, support: list) -> Circuit:\n \"\"\"Takes a circuit and redistributes the gates to the support of\n a new circuit with ``nqubits`` qubits.\n\n Args:\n circuit (Circuit): The circuit with len(``support``) many qubits.\n nqubits (int): Qubits of new circuit.\n support (list): The qubits were the gates should be places.\n\n Returns:\n Circuit: Circuit with redistributed gates.\n \"\"\"\n\n idxmap = np.vectorize(lambda idx: support[idx])\n newcircuit = Circuit(nqubits)\n for gate in circuit.queue:\n if not isinstance(gate, gates.measurements.M):\n newcircuit.add(\n gate.__class__(gate.init_args[0], *idxmap(np.array(gate.init_args[1:])))\n )\n else:", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:", "type": "inproject" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.df = dataframe\n self.all_figures = []\n\n def extract(self, group_by: str, output: str, agg_type: str):\n \"\"\"Aggregates the dataframe, extracts the data by which the frame was\n grouped, what was calculated given the ``agg_type`` parameters.\n\n Args:\n group_by (str): _description_\n output (str): _description_\n agg_type (str): _description_\n \"\"\"\n grouped_df = self.df.groupby(group_by)[output].apply(agg_type)\n return np.array(grouped_df.index), np.array(grouped_df.values.tolist())\n \n def scatter_fit_fig(self, xdata_scatter, ydata_scatter, xdata, ydata):\n myfigs = []\n popt, pcov, x_fit, y_fit = self.fitting_func(xdata, ydata)\n fig = go.Scatter(\n x=xdata_scatter,\n y=ydata_scatter,\n line=dict(color=\"#6597aa\"),\n mode=\"markers\",\n marker={\"opacity\": 0.2, \"symbol\": \"square\"},\n name=\"runs\",\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=xdata, y=ydata, line=dict(color=\"#aa6464\"), mode=\"markers\", name=\"average\"\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=x_fit,\n y=y_fit,\n name=\"A: {:.3f}, p: {:.3f}, B: {:.3f}\".format(popt[0], popt[1], popt[2]),\n line=go.scatter.Line(dash=\"dot\"),\n )\n myfigs.append(fig)\n self.all_figures.append({'figs' : myfigs})\n\n def report(self):\n from plotly.subplots import make_subplots\n\n l = len(self.all_figures)", "type": "common" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.df = dataframe\n self.all_figures = []\n\n def extract(self, group_by: str, output: str, agg_type: str):\n \"\"\"Aggregates the dataframe, extracts the data by which the frame was\n grouped, what was calculated given the ``agg_type`` parameters.\n\n Args:\n group_by (str): _description_\n output (str): _description_\n agg_type (str): _description_\n \"\"\"\n grouped_df = self.df.groupby(group_by)[output].apply(agg_type)", "type": "common" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:", "type": "commited" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:", "type": "commited" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:", "type": "commited" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"", "type": "non_informative" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general", "type": "non_informative" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):", "type": "non_informative" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:", "type": "random" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0", "type": "random" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.df = dataframe\n self.all_figures = []\n\n def extract(self, group_by: str, output: str, agg_type: str):\n \"\"\"Aggregates the dataframe, extracts the data by which the frame was\n grouped, what was calculated given the ``agg_type`` parameters.\n\n Args:\n group_by (str): _description_\n output (str): _description_\n agg_type (str): _description_\n \"\"\"\n grouped_df = self.df.groupby(group_by)[output].apply(agg_type)\n return np.array(grouped_df.index), np.array(grouped_df.values.tolist())\n \n def scatter_fit_fig(self, xdata_scatter, ydata_scatter, xdata, ydata):\n myfigs = []\n popt, pcov, x_fit, y_fit = self.fitting_func(xdata, ydata)\n fig = go.Scatter(\n x=xdata_scatter,\n y=ydata_scatter,\n line=dict(color=\"#6597aa\"),\n mode=\"markers\",\n marker={\"opacity\": 0.2, \"symbol\": \"square\"},\n name=\"runs\",\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=xdata, y=ydata, line=dict(color=\"#aa6464\"), mode=\"markers\", name=\"average\"\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=x_fit,\n y=y_fit,\n name=\"A: {:.3f}, p: {:.3f}, B: {:.3f}\".format(popt[0], popt[1], popt[2]),\n line=go.scatter.Line(dash=\"dot\"),\n )\n myfigs.append(fig)\n self.all_figures.append({'figs' : myfigs})\n\n def report(self):\n from plotly.subplots import make_subplots\n\n l = len(self.all_figures)\n subplot_titles = [figdict.get('subplot_title') for figdict in self.all_figures]\n fig = make_subplots(\n rows=l, cols=1 if len == 1 else 2,\n subplot_titles = subplot_titles)\n for count, fig_dict in enumerate(self.all_figures):", "type": "random" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))\n return circuit\n\n def clifford_unitary(\n self, theta: float = 0, nx: float = 0, ny: float = 0, nz: float = 0\n ) -> np.ndarray:\n \"\"\"Four given parameters are used to build one Clifford unitary.\n\n Args:\n theta (float) : An angle\n nx (float) : prefactor\n ny (float) : prefactor\n nz (float) : prefactor\n\n Returns:\n ``qibo.gates.Unitary`` with the drawn matrix as unitary.\n \"\"\"\n matrix = np.array(\n [\n [\n np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),\n -ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n ],\n [\n ny * np.sin(theta / 2) - 1.0j * nx * np.sin(theta / 2),\n np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),\n ],\n ]\n )\n return matrix\n\n def gates(self) -> list(gates.Unitary):\n \"\"\"Draws the parameters and builds the unitary Clifford gates for\n a circuit layer.\n\n Returns:\n list filled with ``qibo.gates.Unitary``:\n the simulatanous Clifford gates.\n \"\"\"\n # There are this many different Clifford matrices.\n amount = len(ONEQUBIT_CLIFFORD_PARAMS)\n gates_list = []\n # Choose as many random integers between 0 and 23 as there are used\n # qubits. Get the clifford parameters and build the unitares.\n for count, rint in enumerate(np.random.randint(0, amount, size=len(self.qubits))):\n # Build the random Clifford matrices append them\n gates_list.append(\n gates.Unitary(\n self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count\n )\n )\n # Make a unitary gate out of 'unitary' for the qubits.\n return gates_list\n \n\nclass Experiment:\n \"\"\"Experiment objects which holds an iterable circuit factory along with\n a simple data structure associated to each circuit.\n\n Args:\n circuitfactory (Iterable): Gives a certain amount of circuits when\n iterated over.\n data (list): If filled ``data`` can be used to specifying parameters\n while executing a circuit or deciding how to process results.\n nshots (int): For execution of circuit, indicates how many shots.\n \"\"\"\n\n def __init__(\n self,\n circuitfactory: Iterable,\n nshots: int = None,\n data: list = None,\n noisemodel: NoiseModel = None,\n ) -> None:\n \"\"\" \"\"\"\n self.circuitfactory = circuitfactory\n self.nshots = nshots\n self.data = data\n self.__noise_model = noisemodel\n\n @classmethod\n def load(cls, path: str) -> Experiment:\n \"\"\"Creates an object with data and if possible with circuits.\n\n Args:\n path (str): The directory from where the object should be restored.\n\n Returns:\n Experiment: The object with data (and circuitfactory).\n \"\"\"\n datapath = f\"{path}data.pkl\"\n circuitspath = f\"{path}circuits.pkl\"\n if isfile(datapath):\n with open(datapath, \"rb\") as f:\n data = pickle.load(f)\n if isinstance(data, pd.DataFrame):\n data = data.to_dict(\"records\")\n nshots = len(data[0][\"samples\"])\n else:\n data = None\n if isfile(circuitspath):\n with open(circuitspath, \"rb\") as f:\n circuitfactory = pickle.load(f)\n else:\n circuitfactory = None\n # Initiate an instance of the experiment class.\n obj = cls(circuitfactory, data=data, nshots=nshots)\n return obj\n\n def prebuild(self) -> None:\n \"\"\"Converts the attribute ``circuitfactory`` which is in general\n an iterable into a list.\n \"\"\"\n self.circuitfactory = list(self.circuitfactory)\n\n def execute(self) -> None:\n \"\"\"Calls method ``single_task`` while iterating over attribute\n ``circuitfactory```.\n\n Collects data given the already set data and overwrites\n attribute ``data``.\n \"\"\"\n if self.circuitfactory is None:\n raise NotImplementedError(\"There are no circuits to execute.\")\n newdata = []\n for circuit in self.circuitfactory:\n try:\n datarow = next(self.data)\n except TypeError:\n datarow = {}\n newdata.append(self.single_task(deepcopy(circuit), datarow))\n self.data = newdata\n\n def single_task(self, circuit: Circuit, datarow: dict) -> None:\n \"\"\"Executes a circuit, returns the single shot results.\n\n Args:\n circuit (Circuit): Will be executed, has to return samples.\n datarow (dict): Dictionary with parameters for execution and\n immediate postprocessing information.\n \"\"\"\n if self.__noise_model is not None:\n circuit = self.__noise_model.apply(circuit)\n samples = circuit(nshots=self.nshots).samples()\n return {\"samples\": samples}\n\n def save(self) -> None:\n \"\"\"Creates a path and pickles relevent data from ``self.data`` and\n if ``self.circuitfactory`` is a list that one too.\n \"\"\"\n self.path = experiment_directory(\"standardrb\")\n if isinstance(self.circuitfactory, list):\n with open(f\"{self.path}circuits.pkl\", \"wb\") as f:\n pickle.dump(self.circuitfactory, f)\n with open(f\"{self.path}data.pkl\", \"wb\") as f:\n pickle.dump(self.data, f)\n\n @property\n def dataframe(self) -> pd.DataFrame:\n return pd.DataFrame(self.data)\n\n def _append_data(self, name: str, datacolumn: list) -> None:\n \"\"\"Adds data column to ``data`` attribute.\n\n Args:\n name (str): Name of data column.\n datacolumn (list): A list of the right shape\n \"\"\"\n if len(datacolumn) != len(self.data):\n raise ValueError(\"Given data column doesn't have the right length.\")\n df = self.dataframe\n df[name] = datacolumn\n self.data = df.to_dict(\"records\")\n\n @property\n def samples(self) -> np.ndarray:\n \"\"\"Returns the samples from ``self.data`` in a 2d array.\n\n Returns:\n np.ndarray: 2d array of samples.\n \"\"\"\n\n try:\n return np.array(self.dataframe[\"samples\"].tolist())\n except KeyError:\n print(\"No samples here. Execute experiment first.\")\n return None\n\n @property\n def probabilities(self) -> np.ndarray:\n \"\"\"Takes the stored samples and returns probabilities for each\n possible state to occure.\n\n Returns:\n np.ndarray: Probability array of 2 dimension.\n \"\"\"\n\n allsamples = self.samples\n if allsamples is None:\n print(\"No probabilities either.\")\n return None\n # Create all possible state vectors.\n allstates = list(product([0, 1], repeat=len(allsamples[0][0])))\n # Iterate over all the samples and count the different states.\n probs = [\n [np.sum(np.product(samples == state, axis=1)) for state in allstates]\n for samples in allsamples\n ]\n probs = np.array(probs) / (self.nshots)\n return probs\n\n def apply_task(self, gtask):\n self = gtask(self)\n\n\nclass Result:\n \"\"\"Once initialized with the correct parameters an Result object can build\n reports to display results of an randomized benchmarking experiment.\n \"\"\"\n\n def __init__(self, dataframe: pd.DataFrame) -> None:\n self.df = dataframe\n self.all_figures = []\n\n def extract(self, group_by: str, output: str, agg_type: str):\n \"\"\"Aggregates the dataframe, extracts the data by which the frame was\n grouped, what was calculated given the ``agg_type`` parameters.\n\n Args:\n group_by (str): _description_\n output (str): _description_\n agg_type (str): _description_\n \"\"\"\n grouped_df = self.df.groupby(group_by)[output].apply(agg_type)\n return np.array(grouped_df.index), np.array(grouped_df.values.tolist())\n \n def scatter_fit_fig(self, xdata_scatter, ydata_scatter, xdata, ydata):\n myfigs = []\n popt, pcov, x_fit, y_fit = self.fitting_func(xdata, ydata)\n fig = go.Scatter(\n x=xdata_scatter,\n y=ydata_scatter,\n line=dict(color=\"#6597aa\"),\n mode=\"markers\",\n marker={\"opacity\": 0.2, \"symbol\": \"square\"},\n name=\"runs\",\n )\n myfigs.append(fig)\n fig = go.Scatter(\n x=xdata, y=ydata, line=dict(color=\"#aa6464\"), mode=\"markers\", name=\"average\"\n )", "type": "random" }, { "content": "from __future__ import annotations\n\nimport pickle\nfrom collections.abc import Iterable\nfrom copy import deepcopy\nfrom itertools import product\nfrom os.path import isfile\nimport plotly.graph_objects as go\n\nimport numpy as np\nimport pandas as pd\nfrom qibo import gates\nfrom qibo.models import Circuit\nfrom qibo.noise import NoiseModel\n\nfrom qibocal.calibrations.protocols.utils import (\n ONEQUBIT_CLIFFORD_PARAMS,\n experiment_directory,\n)\n\n\nclass Circuitfactory:\n \"\"\"TODO write documentation\n TODO make the embedding into lager qubit space possible\"\"\"\n\n def __init__(\n self, nqubits: int, depths: list, runs: int, qubits: list = None\n ) -> None:\n self.nqubits = nqubits if nqubits is not None else len(qubits)\n self.qubits = qubits if qubits is not None else [x for x in range(nqubits)]\n self.depths = depths\n self.runs = runs\n\n def __len__(self):\n return self.runs * len(self.depths)\n\n def __iter__(self) -> None:\n self.n = 0\n return self\n\n def __next__(self) -> None:\n if self.n >= self.runs * len(self.depths):\n raise StopIteration\n else:\n circuit = self.build_circuit(self.depths[self.n % len(self.depths)])\n self.n += 1\n # Distribute the circuit onto the given support.\n bigcircuit = Circuit(self.nqubits)\n bigcircuit.add(circuit.on_qubits(*self.qubits))\n return bigcircuit\n\n def build_circuit(self, depth: int):\n raise NotImplementedError\n\n\nclass SingleCliffordsFactory(Circuitfactory):\n def __init__(\n self, nqubits: list, depths: list, runs: int, qubits: list = None\n ) -> None:\n super().__init__(nqubits, depths, runs, qubits)\n\n def build_circuit(self, depth: int):\n circuit = Circuit(len(self.qubits))\n for _ in range(depth):\n circuit.add(self.gates())\n circuit.add(gates.M(*range(len(self.qubits))))", "type": "random" } ]
[ " newdata.append(self.single_task(deepcopy(circuit), datarow))", " allsamples = self.samples", " circuit = self.build_circuit(self.depths[self.n % len(self.depths)])", " [np.sum(np.product(samples == state, axis=1)) for state in allstates]", " for samples in allsamples", " samples = circuit(nshots=self.nshots).samples()", " return {\"samples\": samples}", " gates.Unitary(", " self.clifford_unitary(*ONEQUBIT_CLIFFORD_PARAMS[rint]), count", " if not isinstance(gate, gates.measurements.M):", " return np.array(self.dataframe[\"samples\"].tolist())", " circuit.add(self.gates())", " circuit.add(gates.M(*range(len(self.qubits))))", " name=\"A: {:.3f}, p: {:.3f}, B: {:.3f}\".format(popt[0], popt[1], popt[2]),", " grouped_df = self.df.groupby(group_by)[output].apply(agg_type)", " df = self.dataframe", " df[name] = datacolumn", " self.data = df.to_dict(\"records\")", " bigcircuit.add(circuit.on_qubits(*self.qubits))", " for plot in plot_list:", " fig.add_trace(plot, row=count//2 + 1, col = count%2+1)", " newcircuit.add(", " np.cos(theta / 2) - 1.0j * nz * np.sin(theta / 2),", " newcircuit.add(gates.M(*idxmap(np.array(gate.init_args[0:]))))", " np.cos(theta / 2) + 1.0j * nz * np.sin(theta / 2),", " self.df = dataframe", " subplot_titles = [figdict.get('subplot_title') for figdict in self.all_figures]", " return np.array(grouped_df.index), np.array(grouped_df.values.tolist())", " data = pickle.load(f)", " super().__init__(nqubits, depths, runs, qubits)", " circuitfactory = pickle.load(f)", "", " an iterable into a list.", " \"\"\"Draws the parameters and builds the unitary Clifford gates for", " return pd.DataFrame(self.data)", " return self", " plot_list = fig_dict['figs']", " myfigs.append(fig)", " return circuit" ]
METASEP
33
codeforphilly__paws-data-pipeline
codeforphilly__paws-data-pipeline METASEP src/server/api/API_ingest/updated_data.py METASEP from sqlalchemy.orm import sessionmaker from simple_salesforce import Salesforce from config import engine import structlog logger = structlog.get_logger() def get_updated_contact_data(): Session = sessionmaker(engine) qry = """ -- Collect latest foster/volunteer dates with ev_dates as (select person_id, max(case when event_type=1 then time else null end) adopt, max(case when event_type=2 then time else null end) foster_out, -- max(case when event_type=3 then time else null end) rto, max(case when event_type=5 then time else null end) foster_return from sl_animal_events sla left join sl_event_types sle on sle.id = sla.event_type where sle.id in (1,2,5) group by person_id order by person_id ) select json_agg (upd) as "cd" from ( select slsf.source_id as "contactId" , -- long salesforce string slp.id as "personId" , -- short PAWS-local shelterluv id case when (extract(epoch from now())::bigint - foster_out < 365*86400) -- foster out in last year or (extract(epoch from now())::bigint - foster_return < 365*86400) -- foster return then 'Active' else 'Inactive' end as "updatedFosterStatus" , (to_timestamp(foster_out ) at time zone 'America/New_York')::date as "updatedFosterStartDate", (to_timestamp(foster_return ) at time zone 'America/New_York')::date as "updatedFosterEndDate", min(vs.from_date) as "updatedFirstVolunteerDate", max(vs.from_date) as "updatedLastVolunteerDate", vc.source_id as "volgisticsId" from ev_dates left join pdp_contacts slc on slc.source_id = person_id::text and slc.source_type = 'shelterluvpeople' left join pdp_contacts slsf on slsf.matching_id = slc.matching_id and slsf.source_type = 'salesforcecontacts' left join shelterluvpeople slp on slp.internal_id = person_id::text left join pdp_contacts vc on vc.matching_id = slc.matching_id and vc.source_type = 'volgistics' left join volgisticsshifts vs on vs.volg_id::text = vc.source_id where slsf.source_id is not null group by slsf.source_id, slp.id, vc.source_id, foster_out , foster_return ) upd ; """ with Session() as session: result = session.execute(qry) sfdata = result.fetchone()[0] logger.debug("Query for Salesforce update returned %d records", len(sfdata)) return sfdata src/server/api/API_ingest/sl_animal_events.py METASEP import json import os import posixpath as path import structlog logger = structlog.get_logger() import requests from api.API_ingest import shelterluv_db # There are a number of different record types. These are the ones we care about. keep_record_types = [ "Outcome.Adoption", "Outcome.Foster", "Outcome.ReturnToOwner", "Intake.AdoptionReturn", "Intake.FosterReturn" ] # from config import engine # from flask import current_app # from sqlalchemy.sql import text BASE_URL = "http://shelterluv.com/api/" MAX_COUNT = 100 # Max records the API will return for one call # Get the API key try: from secrets_dict import SHELTERLUV_SECRET_TOKEN except ImportError: # Not running locally from os import environ try: SHELTERLUV_SECRET_TOKEN = environ["SHELTERLUV_SECRET_TOKEN"] except KeyError: # Not in environment # You're SOL for now logger.error("Couldn't get SHELTERLUV_SECRET_TOKEN from file or environment") TEST_MODE=os.getenv("TEST_MODE") # if not present, has value None headers = {"Accept": "application/json", "X-API-Key": SHELTERLUV_SECRET_TOKEN} # Sample response from events request: # { # "success": 1, # "events": [ # { # "Type": "Outcome.Adoption", # "Subtype": "PAC", # "Time": "1656536900", # "User": "phlp_mxxxx", # "AssociatedRecords": [ # { # "Type": "Animal", # "Id": "5276xxxx" # }, # { # "Type": "Person", # "Id": "5633xxxx" # } # ] # }, # {...} # ], # "has_more": true, # "total_count": 67467 # } def get_event_count(): """Test that server is operational and get total event count.""" events = "v1/events&offset=0&limit=1" URL = path.join(BASE_URL, events) try: response = requests.request("GET", URL, headers=headers) except Exception as e: logger.error("get_event_count failed with ", e) return -2 if response.status_code != 200: logger.error("get_event_count ", response.status_code, "code") return -3 try: decoded = json.loads(response.text) except json.decoder.JSONDecodeError as e: logger.error("get_event_count JSON decode failed with", e) return -4 if decoded["success"]: return decoded["total_count"] else: logger.error(decoded['error_message']) return -5 # AFAICT, this means URL was bad def get_events_bulk(): """Pull all event records from SL """ # Interesting API design - event record 0 is the newest. But since we pull all records each time it doesn't # really matter which direction we go. Simplest to count up, and we can pull until 'has_more' goes false. # Good news, the API is robust and won't blow up if you request past the end. # At 100 per request, API returns about 5000 records/minute event_records = [] raw_url = path.join(BASE_URL, "v1/events&offset={0}&limit={1}") offset = 0 limit = MAX_COUNT more_records = True while more_records: url = raw_url.format(offset, limit) try: response = requests.request("GET", url, headers=headers) except Exception as e: logger.error("get_events failed with ", e) return -2 if response.status_code != 200: logger.error("get_event_count ", response.status_code, "code") return -3 try: decoded = json.loads(response.text) except json.decoder.JSONDecodeError as e: logger.error("get_event_count JSON decode failed with", e) return -4 if decoded["success"]: for evrec in decoded["events"]: if evrec["Type"] in keep_record_types: event_records.append(evrec) more_records = decoded["has_more"] # if so, we'll make another pass offset += limit if offset % 1000 == 0: logger.debug("Reading offset %s", str(offset)) if TEST_MODE and offset > 1000: more_records=False # Break out early else: return -5 # AFAICT, this means URL was bad return event_records def slae_test(): total_count = get_event_count() logger.debug("Total events: %d", total_count) b = get_events_bulk() logger.debug("Stored records: %d", len(b)) # f = filter_events(b) # print(f) count = shelterluv_db.insert_events(b) return count def store_all_animals_and_events(): total_count = get_event_count() logger.debug("Total events: %d", total_count) b = get_events_bulk() logger.debug("Stored records: %d", len(b)) # f = filter_events(b) # print(f) count = shelterluv_db.insert_events(b) return count # Query to get last adopt/foster event: # """ # select # person_id as sl_person_id, max(to_timestamp(time)::date) as last_fosteradopt_event # from # sl_animal_events # where event_type < 4 -- check this # group by # person_id # order by # person_id asc; # """ # Volgistics last shift # """ # select # volg_id, max(from_date) as last_shift # from # volgisticsshifts # group by # volg_id # order by # volg_id ; # """ src/server/api/API_ingest/shelterluv_people.py METASEP import requests, os from models import ShelterluvPeople from config import engine from sqlalchemy.orm import sessionmaker import structlog logger = structlog.get_logger() try: from secrets_dict import SHELTERLUV_SECRET_TOKEN except ImportError: # Not running locally logger.debug("Couldn't get SHELTERLUV_SECRET_TOKEN from file, trying environment **********") from os import environ try: SHELTERLUV_SECRET_TOKEN = environ['SHELTERLUV_SECRET_TOKEN'] except KeyError: # Not in environment # You're SOL for now logger.error("Couldn't get SHELTERLUV_SECRET_TOKEN from file or environment") TEST_MODE=os.getenv("TEST_MODE") # if not present, has value None LIMIT = 100 ################################# # This script is used to fetch data from shelterluv API. # Please be mindful of your usage. # example: /people will fetch the data of all people. and send approximately 300 requests. # https://help.shelterluv.com/hc/en-us/articles/115000580127-Shelterluv-API-Overview ################################# ######## Insights ############### # Max result items is 100 - even though it's not specifically specified in the above reference # /people has all the data. it seems that /person/:id isn't used ################################# ''' Iterate over all shelterlove people and store in json file in the raw data folder We fetch 100 items in each request, since that is the limit based on our research ''' def store_shelterluv_people_all(): offset = 0 has_more = True Session = sessionmaker(engine) with Session() as session: logger.debug("Truncating table shelterluvpeople") session.execute("TRUNCATE TABLE shelterluvpeople") logger.debug("Start getting shelterluv contacts from people table") while has_more: r = requests.get("http://shelterluv.com/api/v1/people?limit={}&offset={}".format(LIMIT, offset), headers={"x-api-key": SHELTERLUV_SECRET_TOKEN}) response = r.json() for person in response["people"]: #todo: Does this need more "null checks"? session.add(ShelterluvPeople(firstname=person["Firstname"], lastname=person["Lastname"], id=person["ID"] if "ID" in person else None, internal_id=person["Internal-ID"], associated=person["Associated"], street=person["Street"], apartment=person["Apartment"], city=person["City"], state=person["State"], zip=person["Zip"], email=person["Email"], phone=person["Phone"], animal_ids=person["Animal_ids"])) offset += LIMIT has_more = response["has_more"] if not TEST_MODE else response["has_more"] and offset < 1000 if offset % 1000 == 0: logger.debug("Reading offset %s", str(offset)) session.commit() logger.debug("Finished getting shelterluv contacts from people table") return offset src/server/api/API_ingest/shelterluv_db.py METASEP from sqlalchemy import Table, MetaData from sqlalchemy.orm import sessionmaker from config import engine import structlog logger = structlog.get_logger() def insert_animals(animal_list): """Insert animal records into shelterluv_animals table and return row count. """ Session = sessionmaker(engine) session = Session() metadata = MetaData() sla = Table("shelterluv_animals", metadata, autoload=True, autoload_with=engine) # From Shelterluv: ['ID', 'Internal-ID', 'Name', 'Type', 'DOBUnixTime', 'CoverPhoto', 'LastUpdatedUnixTime'] # In db: ['local_id', 'id' (PK), 'name', 'type', 'dob', 'photo', 'update_stamp'] ins_list = [] # Create a list of per-row dicts for rec in animal_list: ins_list.append( { "id": rec["Internal-ID"], "local_id": rec["ID"] if rec["ID"] else 0, # Sometimes there's no local id "name": rec["Name"], "type": rec["Type"], "dob": rec["DOBUnixTime"], "update_stamp": rec["LastUpdatedUnixTime"], "photo": rec["CoverPhoto"], } ) ret = session.execute(sla.insert(ins_list)) session.commit() # Commit all inserted rows session.close() return ret.rowcount def truncate_animals(): """Truncate the shelterluv_animals table""" Session = sessionmaker(engine) session = Session() truncate = "TRUNCATE table shelterluv_animals;" session.execute(truncate) session.commit() # Commit all inserted rows session.close() return 0 def truncate_events(): """Truncate the shelterluv_events table""" Session = sessionmaker(engine) with Session() as session: truncate = "TRUNCATE table sl_animal_events;" session.execute(truncate) session.commit() return 0 def insert_events(event_list): """Insert event records into sl_animal_events table and return row count. """ # Always a clean insert truncate_events() Session = sessionmaker(engine) with Session() as session: metadata = MetaData() sla = Table("sl_animal_events", metadata, autoload=True, autoload_with=engine) # TODO: Pull from DB - inserted in db_setup/base_users.py/populate_sl_event_types() event_map = { "Outcome.Adoption": 1, "Outcome.Foster": 2, "Outcome.ReturnToOwner": 3, "Intake.AdoptionReturn": 4, "Intake.FosterReturn":5 } # """ INSERT INTO "sl_event_types" ("id","event_name") VALUES # ( 1,'Outcome.Adoption' ), # ( 2,'Outcome.Foster' ), # ( 3,'Outcome.ReturnToOwner' ), # ( 4,'Intake.AdoptionReturn' ), # ( 5,'Intake.FosterReturn' ) """ # Event record: [ AssociatedRecords[Type = Person]["Id"]', # AssociatedRecords[Type = Animal]["Id"]', # "Type", # "Time" # ] # # In db: ['id', # 'person_id', # 'animal_id', # 'event_type', # 'time'] ins_list = [] # Create a list of per-row dicts for rec in event_list: ins_list.append( { "person_id": next( filter(lambda x: x["Type"] == "Person", rec["AssociatedRecords"]) )["Id"], "animal_id": next( filter(lambda x: x["Type"] == "Animal", rec["AssociatedRecords"]) )["Id"], "event_type": event_map[rec["Type"]], "time": rec["Time"], } ) # TODO: Wrap with try/catch ret = session.execute(sla.insert(ins_list)) session.commit() logger.debug("finished inserting events") return ret.rowcount src/server/api/API_ingest/shelterluv_animals.py METASEP import os, time, json import posixpath as path import requests from api.API_ingest import shelterluv_db from server.api.API_ingest.shelterluv_db import insert_animals # from config import engine # from flask import current_app # from sqlalchemy.sql import text BASE_URL = 'http://shelterluv.com/api/' MAX_COUNT = 100 # Max records the API will return for one call try: from secrets_dict import SHELTERLUV_SECRET_TOKEN except ImportError: # Not running locally from os import environ try: SHELTERLUV_SECRET_TOKEN = environ['SHELTERLUV_SECRET_TOKEN'] except KeyError: # Not in environment # You're SOL for now print("Couldn't get SHELTERLUV_SECRET_TOKEN from file or environment") headers = { "Accept": "application/json", "X-API-Key": SHELTERLUV_SECRET_TOKEN } logger = print def get_animal_count(): """Test that server is operational and get total animal count.""" animals = 'v1/animals&offset=0&limit=1' URL = path.join(BASE_URL,animals) try: response = requests.request("GET",URL, headers=headers) except Exception as e: logger('get_animal_count failed with ', e) return -2 if response.status_code != 200: logger("get_animal_count ", response.status_code, "code") return -3 try: decoded = json.loads(response.text) except json.decoder.JSONDecodeError as e: logger("get_animal_count JSON decode failed with", e) return -4 if decoded['success']: return decoded['total_count'] else: return -5 # AFAICT, this means URL was bad def get_updated_animal_count(last_update): """Test that server is operational and get total animal count.""" animals = 'v1/animals&offset=0&limit=1&sort=updated_at&since=' + str(last_update) URL = path.join(BASE_URL,animals) try: response = requests.request("GET",URL, headers=headers) except Exception as e: logger('get_updated_animal_count failed with ', e) return -2 if response.status_code != 200: logger("get_updated_animal_count ", response.status_code, "code") return -3 try: decoded = json.loads(response.text) except json.decoder.JSONDecodeError as e: logger("get_updated_animal_count JSON decode failed with", e) return -4 if decoded['success']: return decoded['total_count'] else: return -5 # AFAICT, this means URL was bad def filter_animals(raw_list): """Given a list of animal records as returned by SL, return a list of records with only the fields we care about.""" good_keys = ['ID', 'Internal-ID', 'Name', 'Type', 'DOBUnixTime', 'CoverPhoto','LastUpdatedUnixTime'] filtered = [] for r in raw_list: f = {} for k in good_keys: try: f[k] = r[k] except: if k in ('DOBUnixTime','LastUpdatedUnixTime'): f[k] = 0 else: f[k] = '' filtered.append(f) return filtered def get_animals_bulk(total_count): """Pull all animal records from SL """ # 'Great' API design - animal record 0 is the newest, so we need to start at the end, # back up MAX_COUNT rows, make our request, then keep backing up. We need to keep checking # the total records to ensure one wasn't added in the middle of the process. # Good news, the API is robust and won't blow up if you request past the end. raw_url = path.join(BASE_URL, 'v1/animals&offset={0}&limit={1}') start_record = int(total_count) offset = (start_record - MAX_COUNT) if (start_record - MAX_COUNT) > -1 else 0 limit = MAX_COUNT while offset > -1 : logger("getting at offset", offset) url = raw_url.format(offset,limit) try: response = requests.request("GET",url, headers=headers) except Exception as e: logger('get_animals failed with ', e) return -2 if response.status_code != 200: logger("get_animal_count ", response.status_code, "code") return -3 try: decoded = json.loads(response.text) except json.decoder.JSONDecodeError as e: logger("get_animal_count JSON decode failed with", e) return -4 if decoded['success']: insert_animals( filter_animals(decoded['animals']) ) if offset == 0: break offset -= MAX_COUNT if offset < 0 : limit = limit + offset offset = 0 else: return -5 # AFAICT, this means URL was bad return 'zero' def update_animals(last_update): """Get the animals inserted or updated since last check, insert/update db records. """ updated_records = get_updated_animal_count(last_update) def sla_test(): total_count = get_animal_count() print('Total animals:',total_count) b = get_animals_bulk(total_count) print(len(b)) # f = filter_animals(b) # print(f) # count = shelterluv_db.insert_animals(f) return len(b) # if __name__ == '__main__' : # total_count = get_animal_count() # print('Total animals:',total_count) # b = get_animals_bulk(9) # print(len(b)) # f = filter_animals(b) # print(f) # count = shelterluv_db.insert_animals(f) src/server/api/API_ingest/salesforce_contacts.py METASEP import os import structlog from simple_salesforce import Salesforce from sqlalchemy.orm import sessionmaker from config import engine from models import SalesForceContacts logger = structlog.get_logger() TEST_MODE = os.getenv("TEST_MODE") # if not present, has value None def store_contacts_all(): Session = sessionmaker(engine) with Session() as session: logger.debug("truncating table salesforcecontacts") session.execute("TRUNCATE TABLE salesforcecontacts") logger.debug("retrieving the latest salesforce contacts data") if os.path.exists('server/bin/connected-app-secrets.pem'): pem_file = 'server/bin/connected-app-secrets.pem' elif os.path.exists('bin/connected-app-secrets.pem'): pem_file = 'bin/connected-app-secrets.pem' else: logger.error("Missing salesforce jwt private key pem file, skipping data pull") return sf = Salesforce(username=os.getenv('SALESFORCE_USERNAME'), consumer_key=os.getenv('SALESFORCE_CONSUMER_KEY'), privatekey_file=pem_file) results = sf.query("SELECT Contact_ID_18__c, FirstName, LastName, Contact.Account.Name, MailingCountry, MailingStreet, MailingCity, MailingState, MailingPostalCode, Phone, MobilePhone, Email FROM Contact") logger.debug("%d total Salesforce contact records", results['totalSize']) if TEST_MODE: logger.debug("running in test mode so only downloading first page of Salesforce contacts") total_records = 0 done = False while not done: total_records += len(results['records']) logger.debug("Query returned %d Salesforce contact records, total %d", len(results['records']), total_records) for row in results['records']: account_name = row['Account']['Name'] if row['Account'] is not None else None contact = SalesForceContacts(contact_id=row['Contact_ID_18__c'], first_name=row['FirstName'], last_name=row['LastName'], account_name=account_name, mailing_country=row['MailingCountry'], mailing_street=row['MailingStreet'], mailing_city=row['MailingCity'], mailing_state_province=row['MailingState'], mailing_zip_postal_code=row['MailingPostalCode'], phone=row['Phone'], mobile=row['MobilePhone'], email=row['Email']) session.add(contact) # if in test mode only return first page of results done = results['done'] if not TEST_MODE else True if not done: results = sf.query_more(results['nextRecordsUrl'], True) logger.debug("Committing downloaded contact records") session.commit() logger.debug("finished downloading latest salesforce contacts data") src/server/api/API_ingest/ingest_sources_from_api.py METASEP from api.API_ingest import shelterluv_people, salesforce_contacts, sl_animal_events import structlog logger = structlog.get_logger() def start(): logger.debug("Start Fetching raw data from different API sources") logger.debug(" Fetching Salesforce contacts") salesforce_contacts.store_contacts_all() logger.debug(" Finished fetching Salesforce contacts") logger.debug(" Fetching Shelterluv people") slp_count = shelterluv_people.store_shelterluv_people_all() logger.debug(" Finished fetching Shelterluv people - %d records" , slp_count) logger.debug(" Fetching Shelterluv events") sle_count = sl_animal_events.store_all_animals_and_events() logger.debug(" Finished fetching Shelterluv events - %d records" , sle_count) logger.debug("Finished fetching raw data from different API sources") src/server/api/API_ingest/dropbox_handler.py METASEP import dropbox import structlog logger = structlog.get_logger() try: from secrets_dict import DROPBOX_APP except ImportError: # Not running locally logger.debug("Couldn't get DROPBOX_APP from file, trying environment **********") from os import environ try: DROPBOX_APP = environ['DROPBOX_APP'] except KeyError: # Not in environment # You're SOL for now logger.error("Couldn't get DROPBOX_APP from file or environment") class TransferData: def __init__(self, access_token): self.access_token = access_token def upload_file(self, file_from, file_to): dbx = dropbox.Dropbox(self.access_token) with open(file_from, 'rb') as f: dbx.files_upload(f.read(), file_to) def upload_file_to_dropbox(file_path, upload_path): access_token = DROPBOX_APP transfer_data = TransferData(access_token) file_from = file_path file_to = upload_path # The full path to upload the file to, including the file name transfer_data.upload_file(file_from, file_to) src/server/api/API_ingest/__init__.py METASEP src/server/alembic/versions/fd187937528b_create_pdp_contacts_table.py METASEP """create pdp_contacts table Revision ID: fd187937528b Revises: 57b547e9b464 Create Date: 2021-08-10 20:16:54.169168 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects.postgresql import JSONB import datetime # revision identifiers, used by Alembic. revision = 'fd187937528b' down_revision = '57b547e9b464' branch_labels = None depends_on = None def upgrade(): op.create_table('pdp_contacts', sa.Column('_id', sa.Integer, primary_key=True, autoincrement=True), sa.Column('matching_id', sa.Integer, primary_key=True), sa.Column('source_type', sa.String, nullable=False), sa.Column('source_id', sa.String, nullable=False), sa.Column('is_organization', sa.Boolean), sa.Column('first_name', sa.String), sa.Column('last_name', sa.String), sa.Column('email', sa.String), sa.Column('mobile', sa.String), sa.Column('street_and_number', sa.String), sa.Column('apartment', sa.String), sa.Column('city', sa.String), sa.Column('state', sa.String), sa.Column('zip', sa.String), sa.Column('json', JSONB), sa.Column('created_date', sa.DateTime, default=datetime.datetime.utcnow), sa.Column('archived_date', sa.DateTime, default=None) ) def downgrade(): op.drop_table("pdp_contacts") op.drop_table("pdp_contact_types") src/server/alembic/versions/fc7325372396_merge_heads.py METASEP """Merges heads '8f4, '28b Revision ID: fc7325372396 Revises: a3ba63dee8f4, fd187937528b Create Date: 2022-01-17 22:05:05.824901 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'fc7325372396' down_revision = ('a3ba63dee8f4', 'fd187937528b') branch_labels = None depends_on = None def upgrade(): pass def downgrade(): pass src/server/alembic/versions/f3d30db17bed_change_pdp_users_password_to_bytea.py METASEP """Change pdp_users.password to bytea Revision ID: f3d30db17bed Revises: 41da831646e4 Create Date: 2020-12-16 21:26:08.548724 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "f3d30db17bed" down_revision = "41da831646e4" branch_labels = None depends_on = None def upgrade(): op.drop_column("pdp_users", "password") op.add_column("pdp_users", sa.Column("password", sa.LargeBinary, nullable=False)) def downgrade(): op.drop_column("pdp_users", "password") op.add_column("pdp_users", "password", sa.String(50), nullable=False), src/server/alembic/versions/e3ef522bd3d9_explicit_create_sfd.py METASEP """Explicit creation for salesforcedonations Revision ID: e3ef522bd3d9 Revises: bfb1262d3195 Create Date: 2021-06-18 21:55:56.651101 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'e3ef522bd3d9' down_revision = 'bfb1262d3195' branch_labels = None depends_on = None def upgrade(): op.create_table ( "salesforcedonations", sa.Column("_id", sa.Integer, primary_key=True), sa.Column("opp_id", sa.String(), nullable=False), sa.Column("recurring_donor", sa.Boolean, nullable=False), sa.Column("primary_contact", sa.String(), nullable=True), sa.Column("contact_id", sa.String(), nullable=False), sa.Column("amount", sa.DECIMAL, nullable=False), sa.Column("close_date", sa.Date, nullable=False), sa.Column("donation_type", sa.String(), nullable=True), sa.Column("primary_campaign_source", sa.String(),nullable=True) ) op.execute("""CREATE INDEX sfd_contact_id_idx ON public.salesforcedonations USING btree (contact_id);""" ) op.create_unique_constraint( "uq_donation", "salesforcedonations", ["opp_id", "contact_id", "close_date", "amount"] ) def downgrade(): op.drop_table("salesforcedonations") src/server/alembic/versions/d0841384d5d7_explicitly_create_vshifts.py METASEP """Explicitly create vshifts Revision ID: d0841384d5d7 Revises: e3ef522bd3d9 Create Date: 2021-07-05 22:05:52.743905 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd0841384d5d7' down_revision = 'e3ef522bd3d9' branch_labels = None depends_on = None def upgrade(): op.create_table ( "volgisticsshifts", sa.Column("_id", sa.Integer, primary_key=True), sa.Column("volg_id", sa.Integer, nullable=False), sa.Column("assignment", sa.String(), nullable=True), sa.Column("site", sa.String(), nullable=True), sa.Column("from_date", sa.Date, nullable=False), sa.Column("hours", sa.DECIMAL, nullable=False) ) op.execute("""CREATE INDEX vs_volg_id_idx ON public.volgisticsshifts USING btree (volg_id);""" ) op.create_unique_constraint( "uq_shift", "volgisticsshifts", ["volg_id", "assignment", "from_date", "hours"] ) def downgrade(): op.drop_table("volgisticsshifts") src/server/alembic/versions/bfb1262d3195_create_execution_status_table.py METASEP """create execution status table Revision ID: bfb1262d3195 Revises: 05e0693f8cbb Create Date: 2021-05-28 16:12:40.561829 """ from alembic import op import sqlalchemy as sa from sqlalchemy.sql.sqltypes import Integer from sqlalchemy.sql import func # revision identifiers, used by Alembic. revision = 'bfb1262d3195' down_revision = '05e0693f8cbb' branch_labels = None depends_on = None def upgrade(): op.create_table ( "execution_status", sa.Column("_id", sa.Integer, primary_key=True), sa.Column("job_id", sa.Integer, nullable=False), sa.Column("stage", sa.String(32), nullable=False), sa.Column("status", sa.String(32), nullable=False), sa.Column("details", sa.String(128), nullable=False), sa.Column("update_stamp", sa.DateTime, nullable=False, server_default=func.now()) ) op.execute("""CREATE FUNCTION last_upd_trig() RETURNS trigger LANGUAGE plpgsql AS $$BEGIN NEW.update_stamp := current_timestamp; RETURN NEW; END;$$;""") op.execute("""CREATE TRIGGER last_upd_trigger BEFORE INSERT OR UPDATE ON execution_status FOR EACH ROW EXECUTE PROCEDURE last_upd_trig();""" ) # Postgres-specific, obviously op.create_unique_constraint("uq_job_id", "execution_status", ["job_id"]) def downgrade(): op.drop_table("execution_status") op.execute("DROP FUNCTION last_upd_trig()") src/server/alembic/versions/a3ba63dee8f4_rmv_details_size_limit.py METASEP """Remove execution_status.details field size limit Revision ID: a3ba63dee8f4 Revises: 40be910424f0 Create Date: 2021-09-18 18:14:48.044985 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'a3ba63dee8f4' down_revision = '40be910424f0' branch_labels = None depends_on = None def upgrade(): op.alter_column('execution_status',"details", type_=sa.String(None) ) def downgrade(): op.alter_column('execution_status',"details", type_=sa.String(128) ) src/server/alembic/versions/9687db7928ee_shelterluv_animals.py METASEP """Create SL_animals table Revision ID: 9687db7928ee Revises: 45a668fa6325 Create Date: 2021-12-24 21:15:33.399197 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '9687db7928ee' down_revision = '45a668fa6325' branch_labels = None depends_on = None def upgrade(): op.create_table ( "shelterluv_animals", sa.Column("id", sa.BigInteger, primary_key=True), sa.Column("local_id", sa.BigInteger, nullable=False), sa.Column("name", sa.Text, nullable=False), sa.Column("type", sa.Text, nullable=False), sa.Column("dob", sa.BigInteger, nullable=False), sa.Column("update_stamp", sa.BigInteger, nullable=False), sa.Column("photo", sa.Text, nullable=False) ) def downgrade(): op.drop_table("shelterluv_animals") src/server/alembic/versions/90f471ac445c_create_sl_events.py METASEP """Shelterluv animal events table Revision ID: 90f471ac445c Revises: 9687db7928ee Create Date: 2022-09-04 17:21:51.511030 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '90f471ac445c' down_revision = '9687db7928ee' branch_labels = None depends_on = None def upgrade(): op.create_table ( "sl_event_types", sa.Column("id", sa.Integer, autoincrement=True, primary_key=True), sa.Column("event_name", sa.Text, nullable=False), ) op.create_table ( "sl_animal_events", sa.Column("id", sa.Integer, autoincrement=True, primary_key=True), sa.Column("person_id", sa.Integer, nullable=False), sa.Column("animal_id", sa.Integer, nullable=False), sa.Column("event_type", sa.Integer, sa.ForeignKey('sl_event_types.id')), sa.Column("time", sa.BigInteger, nullable=False) ) op.create_index('sla_idx', 'sl_animal_events', ['person_id']) def downgrade(): op.drop_table("sl_animal_events") op.drop_table("sl_event_types") src/server/alembic/versions/783cabf889d9_inital_schema_setup.py METASEP """inital schema setup Revision ID: 783cabf889d9 Revises: Create Date: 2020-12-16 01:47:43.686881 """ from sqlalchemy.sql.expression import null from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '783cabf889d9' down_revision = None branch_labels = None depends_on = None def upgrade(): op.create_table( 'pdp_user_roles', sa.Column('_id', sa.Integer, primary_key=True), sa.Column('role', sa.String(50), nullable=False) ) op.create_table( 'pdp_users', sa.Column('_id', sa.Integer, primary_key=True), sa.Column('username', sa.String(50), nullable=False), sa.Column('role', sa.String(50), nullable=False), sa.Column('password', sa.String(50), nullable=False), sa.Column('active', sa.String(50), nullable=False), sa.Column('created', sa.DateTime,nullable=False, server_default='now()') ) def downgrade(): pass src/server/alembic/versions/72d50d531bd5_fix_pdp_users_timestamp.py METASEP """Fix pdp_users timestamp Revision ID: 72d50d531bd5 Revises: 783cabf889d9 Create Date: 2020-12-16 15:22:54.734670 """ from alembic import op import sqlalchemy as sa from sqlalchemy.sql import func # revision identifiers, used by Alembic. revision = "72d50d531bd5" down_revision = "783cabf889d9" branch_labels = None depends_on = None def upgrade(): op.drop_column("pdp_users", "created") op.add_column( "pdp_users", sa.Column("created", sa.DateTime, nullable=False, server_default=func.now()), ) def downgrade(): sa.Column("created", sa.DateTime, nullable=False, server_default="now()") src/server/alembic/versions/7138d52f92d6_add_uniqueness_constraints.py METASEP """add uniqueness constraints Revision ID: 7138d52f92d6 Revises: f3d30db17bed Create Date: 2020-12-17 17:31:29.154789 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "7138d52f92d6" down_revision = "f3d30db17bed" branch_labels = None depends_on = None def upgrade(): op.create_unique_constraint("uq_username", "pdp_users", ["username"]) op.create_unique_constraint("uq_role", "pdp_user_roles", ["role"]) def downgrade(): pass src/server/alembic/versions/6b8cf99be000_add_user_journal_table.py METASEP """Add user journal table Revision ID: 6b8cf99be000 Revises: 36c4ecbfd11a Create Date: 2020-12-21 15:08:07.784568 """ from alembic import op import sqlalchemy as sa from sqlalchemy.sql import func # revision identifiers, used by Alembic. revision = "6b8cf99be000" down_revision = "36c4ecbfd11a" branch_labels = None depends_on = None def upgrade(): op.create_table( "pdp_user_journal", sa.Column("_id", sa.Integer, primary_key=True), sa.Column("stamp", sa.DateTime, nullable=False, server_default=func.now()), sa.Column("username", sa.String(50), nullable=False), sa.Column("event_type", sa.String(50)), sa.Column("detail", sa.String(120)), ) def downgrade(): op.drop_table('pdp_user_journal') src/server/alembic/versions/57b547e9b464_create_rfm_edges_table.py METASEP """Create RFM edges table Revision ID: 57b547e9b464 Revises: 494e064d69a3 Create Date: 2021-07-20 21:39:00.438116 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '57b547e9b464' down_revision = '494e064d69a3' branch_labels = None depends_on = None def upgrade(): op.create_table ( "rfm_edges", sa.Column("component", sa.String(), primary_key=True), sa.Column("edge_string", sa.String(), nullable=False) ) def downgrade(): op.drop_table("rfm_edges") src/server/alembic/versions/494e064d69a3_tables_for_rfm_data.py METASEP """Tables for RFM data Revision ID: 494e064d69a3 Revises: d0841384d5d7 Create Date: 2021-07-20 19:45:29.418756 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '494e064d69a3' down_revision = 'd0841384d5d7' branch_labels = None depends_on = None def upgrade(): op.create_table ( "rfm_scores", sa.Column("matching_id", sa.Integer, primary_key=True), sa.Column("rfm_score", sa.String(3), nullable=False) ) op.create_table ( "rfm_mapping", sa.Column("rfm_value", sa.String(3), primary_key=True), sa.Column("rfm_label", sa.String(), nullable=True), sa.Column("rfm_color", sa.String(), nullable=True, default='0xe0e0e0') ) def downgrade(): op.drop_table("rfm_scores") op.drop_table("rfm_mapping") src/server/alembic/versions/45a668fa6325_postgres_matching.py METASEP """postgres matching Revision ID: 45a668fa6325 Revises: fc7325372396 Create Date: 2022-02-10 16:19:13.283250 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '45a668fa6325' down_revision = 'fc7325372396' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('manual_matches', sa.Column('source_type_1', sa.String(), nullable=False), sa.Column('source_id_1', sa.String(), nullable=False), sa.Column('source_type_2', sa.String(), nullable=False), sa.Column('source_id_2', sa.String(), nullable=False), sa.PrimaryKeyConstraint('source_type_1', 'source_id_1', 'source_type_2', 'source_id_2') ) op.create_table('salesforcecontacts', sa.Column('_id', sa.Integer(), nullable=False), sa.Column('contact_id', sa.String(), nullable=True), sa.Column('first_name', sa.String(), nullable=True), sa.Column('last_name', sa.String(), nullable=True), sa.Column('account_name', sa.String(), nullable=True), sa.Column('mailing_country', sa.String(), nullable=True), sa.Column('mailing_street', sa.String(), nullable=True), sa.Column('mailing_city', sa.String(), nullable=True), sa.Column('mailing_state_province', sa.String(), nullable=True), sa.Column('mailing_zip_postal_code', sa.String(), nullable=True), sa.Column('phone', sa.String(), nullable=True), sa.Column('mobile', sa.String(), nullable=True), sa.Column('email', sa.String(), nullable=True), sa.Column('json', postgresql.JSONB(astext_type=sa.Text()), nullable=True), sa.Column('created_date', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('_id') ) op.create_table('shelterluvpeople', sa.Column('_id', sa.Integer(), nullable=False), sa.Column('firstname', sa.String(), nullable=True), sa.Column('lastname', sa.String(), nullable=True), sa.Column('id', sa.String(), nullable=True), sa.Column('internal_id', sa.String(), nullable=True), sa.Column('associated', sa.String(), nullable=True), sa.Column('street', sa.String(), nullable=True), sa.Column('apartment', sa.String(), nullable=True), sa.Column('city', sa.String(), nullable=True), sa.Column('state', sa.String(), nullable=True), sa.Column('zip', sa.String(), nullable=True), sa.Column('email', sa.String(), nullable=True), sa.Column('phone', sa.String(), nullable=True), sa.Column('animal_ids', postgresql.JSONB(astext_type=sa.Text()), nullable=True), sa.Column('json', postgresql.JSONB(astext_type=sa.Text()), nullable=True), sa.Column('created_date', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('_id') ) op.create_table('volgistics', sa.Column('_id', sa.Integer(), nullable=False), sa.Column('number', sa.String(), nullable=True), sa.Column('last_name', sa.String(), nullable=True), sa.Column('first_name', sa.String(), nullable=True), sa.Column('middle_name', sa.String(), nullable=True), sa.Column('complete_address', sa.String(), nullable=True), sa.Column('street_1', sa.String(), nullable=True), sa.Column('street_2', sa.String(), nullable=True), sa.Column('street_3', sa.String(), nullable=True), sa.Column('city', sa.String(), nullable=True), sa.Column('state', sa.String(), nullable=True), sa.Column('zip', sa.String(), nullable=True), sa.Column('all_phone_numbers', sa.String(), nullable=True), sa.Column('home', sa.String(), nullable=True), sa.Column('work', sa.String(), nullable=True), sa.Column('cell', sa.String(), nullable=True), sa.Column('email', sa.String(), nullable=True), sa.Column('json', postgresql.JSONB(astext_type=sa.Text()), nullable=True), sa.Column('created_date', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('_id') ) op.create_index('idx_pdp_contacts_source_type_and_id', 'pdp_contacts', ['source_type', 'source_id'], unique=False) op.create_index(op.f('ix_pdp_contacts_mobile'), 'pdp_contacts', ['mobile'], unique=False) op.create_index(op.f('idx_pdp_contacts_lower_first_name'), 'pdp_contacts', [sa.text('lower(first_name)')], unique=False) op.create_index(op.f('idx_pdp_contacts_lower_last_name'), 'pdp_contacts', [sa.text('lower(last_name)')], unique=False) op.create_index(op.f('idx_pdp_contacts_lower_email'), 'pdp_contacts', [sa.text('lower(email)')], unique=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_pdp_contacts_lower_email'), table_name='pdp_contacts') op.drop_index(op.f('ix_pdp_contacts_lower_last_name'), table_name='pdp_contacts') op.drop_index(op.f('ix_pdp_contacts_lower_first_name'), table_name='pdp_contacts') op.drop_index(op.f('ix_pdp_contacts_mobile'), table_name='pdp_contacts') op.drop_index('idx_pdp_contacts_source_type_and_id', table_name='pdp_contacts') op.drop_table('volgistics') op.drop_table('shelterluvpeople') op.drop_table('salesforcecontacts') op.drop_table('manual_matches') # ### end Alembic commands ### src/server/alembic/versions/41da831646e4_pdp_users_role_fk_from_roles.py METASEP """pdp_users.role FK from roles Revision ID: 41da831646e4 Revises: 72d50d531bd5 Create Date: 2020-12-16 15:53:28.514053 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "41da831646e4" down_revision = "72d50d531bd5" branch_labels = None depends_on = None def upgrade(): op.drop_column("pdp_users", "role") op.add_column( "pdp_users", sa.Column("role", sa.Integer, sa.ForeignKey("pdp_user_roles._id")) ) def downgrade(): pass src/server/alembic/versions/40be910424f0_update_rfm_mapping_remove_rfm_edges.py METASEP """Update rfm_mapping, remove rfm_edges Revision ID: 40be910424f0 Revises: 57b547e9b464 Create Date: 2021-08-08 17:26:40.622536 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '40be910424f0' down_revision = '57b547e9b464' branch_labels = None depends_on = None def upgrade(): op.drop_table("rfm_edges") # Unneeded, unused op.add_column('rfm_mapping', sa.Column('rfm_text_color', sa.String()) ) def downgrade(): op.create_table ( "rfm_edges", sa.Column("component", sa.String(), primary_key=True), sa.Column("edge_string", sa.String(), nullable=False) ) src/server/alembic/versions/36c4ecbfd11a_add_pdp_users_full_name.py METASEP """Add pdp_users full_name Revision ID: 36c4ecbfd11a Revises: 7138d52f92d6 Create Date: 2020-12-18 15:28:17.367718 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "36c4ecbfd11a" down_revision = "7138d52f92d6" branch_labels = None depends_on = None def upgrade(): op.add_column("pdp_users", sa.Column("full_name", sa.String)) def downgrade(): op.drop_column("pdp_users", "full_name") src/server/alembic/versions/05e0693f8cbb_key_value_table.py METASEP """key/value table Revision ID: 05e0693f8cbb Revises: 6b8cf99be000 Create Date: 2021-03-18 11:35:43.512082 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '05e0693f8cbb' down_revision = '6b8cf99be000' branch_labels = None depends_on = None def upgrade(): op.create_table( 'kv_unique', sa.Column('_id', sa.Integer, primary_key=True), sa.Column('keycol', sa.String(50), nullable=False, unique=True), sa.Column('valcol', sa.String(65536), nullable=True), ) # op.create_index('kvk_ix', 'kv_unique', ['key'], unique=True) def downgrade(): op.drop_table('kv_unique') src/server/rfm_funcs/test_rfm.py METASEP # This function is meant to test the RFM create_scores.py function. ''' Things needed 1. Create mock data a. Mock data must be realistic b. mock data must have 5^3 possibilities for RFM score, i.e., 1 RFM score each. c. Therefore we need 125 unique rows. d. Recency needs to have at least 5 different dates e. Frequency needs to have at least 5 different IDs f. Monetary needs to have at least 5 different amounts g. Each subject ID will get an RFM score. 2. create_scores.py will accept this mock data and then generate a new RFM score 3. final step of this function will perform a jaccard similarity analysis to determine if the vectors match where the result should be exatly 1.0 ''' src/server/rfm_funcs/rfm_functions.py METASEP # rfm_funcs ### A number of RFM functions which are called by the main create_scores function. # def date_difference(my_date, query_date): # ''' # This function takes in a single date from the donations dataframe (per row) and compares the difference between that date and the date in which matching occurs. # I.e. pipeline matching should provide a query_date so that this can work. # ''' # from datetime import datetime, date # d1 = datetime.strptime(str(my_date), "%Y-%m-%d") # d2 = datetime.strptime(str(query_date), "%Y-%m-%d") # diff = (d2 - d1) # return diff def rfm_concat(days_score, frequency_score, amount_score): ''' This function takes in three pandas.series columns and returns a concatenated version of each score for a total rfm score. Assumes that arg1 are Recency, arg2 are Frequency and arg3 are Monetary values arg1: pandas.series arg2: pandas.series arg3: pandas.series ''' def concat(a, b, c): return int(f"{a}{b}{c}") rfm_score = list() for ii, jj, kk in zip(days_score, frequency_score, amount_score): rfm_score.append(concat(ii,jj,kk)) return rfm_score def merge_series(list1, list2): ''' This function takes in two tuples and merges them into a list of tuples. ''' merged_list = tuple(zip(list(list1), list(list2))) return merged_list def create_bins_dict(recency_edges, frequency_edges, monetary_edges): ''' Create_bins_dict-- creates dictionaries for each edge and label pairing This function takes in user defined bin edges and respective labels per each bin edge. User should input a list of edges and labels in corresponding order. A set of edges and bins for each score should be entered. e.g. recency_edges = np.array([0, 1., 2.,4., 10.]) ''' recency_dict = {} recency_labels = list(5,4,3,2,1) for ii,jj in zip(recency_labels, recency_edges): recency_dict["{0}".format(ii)] = jj frequency_dict = {} frequency_labels= list(1,2,3,4,5) for tt,kk in zip(frequency_labels, frequency_edges): frequency_dict["{0}".format(tt)] = kk monetary_dict = {} monetary_labels = list(1,2,3,4,5) for ww,hh in zip(monetary_labels, monetary_edges): monetary_dict["{0}".format(ww)] = hh return recency_dict, frequency_dict, monetary_dict src/server/rfm_funcs/create_scores.py METASEP from config import engine from flask import current_app import traceback import pandas as pd import numpy as np from datetime import datetime, date from collections import Counter import dateutil import structlog logger = structlog.get_logger() def date_difference(my_date, max_date): ''' This function takes in a single date from the donations dataframe (per row) and compares the difference between that date and the date in which matching occurs. I.e. pipeline matching should provide a query_date so that this can work. ''' d1 = datetime.strptime(str(my_date), "%Y-%m-%d") d2 = datetime.strptime(str(max_date), "%Y-%m-%d") diff = (d2 - d1) return diff def create_scores(): ''' (used to) require query date as input-- must be string in the following format "%Y-%m-%d" returns a list of matching_ids and scores as tuples will also insert rfm scores into rfm_scores table----see src/server/api/admin_api.py ''' # We calculate query_date below in frequncy with engine.connect() as connection: logger.debug("running create_scores()") # read in data from database via pull_donations_for_rfm() func (reads in as a list of tuples) df = pd.read_sql( """ select pc.matching_id, s.amount, s.close_date from salesforcedonations s inner join pdp_contacts pc on pc.source_id = s.contact_id and pc.source_type = 'salesforcecontacts' where pc.archived_date is null order by matching_id """ , connection) df = pd.DataFrame(df, columns=['matching_id', 'amount', 'close_date']) from api.admin_api import read_rfm_edges, insert_rfm_scores # Avoid circular import issues rfm_dict = read_rfm_edges() if len(rfm_dict) == 3: # r,f,m try: recency_labels = [5,4,3,2,1] recency_bins = list(rfm_dict['r'].values()) #imported from table frequency_labels = [1,2,3,4,5] frequency_bins = list(rfm_dict['f'].values()) #imported from table monetary_labels = [1,2,3,4,5] monetary_bins = list(rfm_dict['m'].values()) #imported from table ########################## recency ######################################### donations_past_year = df donations_past_year['close_date'] =pd.to_datetime(donations_past_year['close_date']).dt.date # calculate date difference between input date and individual row close date days = [] max_close_date = donations_past_year['close_date'].max() for ii in donations_past_year['close_date']: days.append(date_difference(ii, max_close_date)) donations_past_year['days_since'] = days grouped_past_year = donations_past_year.groupby('matching_id').agg({'days_since': ['min']}).reset_index() logger.debug(grouped_past_year.head()) grouped_past_year[('days_since', 'min')]= grouped_past_year[('days_since', 'min')].dt.days max_maybe = grouped_past_year[('days_since', 'min')].max() real_max = max(max_maybe, max(recency_bins)+1 ) recency_bins.append(real_max) grouped_past_year['recency_score'] = pd.cut(grouped_past_year[('days_since','min')], bins= recency_bins, labels=recency_labels, include_lowest = True) grouped_past_year.rename(columns={('recency_score', ''): 'recency_score'}) ################################## frequency ############################### query_date = df['close_date'].max() df['close_date'] = pd.DatetimeIndex(df['close_date']) df_grouped = df.groupby(['matching_id', pd.Grouper(key = 'close_date', freq = 'Q')]).count().max(level=0) df_freq = df.loc[df['close_date'] > pd.Timestamp(query_date) - pd.Timedelta( "365 days") ] #pd.DatetimeIndex(df['close_date'] - pd.Timedelta( "30 days") ) df_grouped = df_freq.groupby(['matching_id']).count() df_grouped = df_grouped.reset_index() frequency_bins.append(np.inf) df_frequency = df_grouped[['matching_id' , 'amount']] # amount is a placeholder as the groupby step just gives a frequency count, the value doesn't correspond to donation monetary amount. df_frequency = df_frequency.rename(columns = {'amount':'frequency'}) #renaming amount to frequency df_frequency['frequency_score'] = pd.cut(df_frequency['frequency'], bins = frequency_bins, labels=frequency_labels, include_lowest=False) ################################## amount ################################## # Need to score people with R, M but not F as a 1 monetary_bins.append(np.inf) df_amount = df.groupby(df['matching_id'], as_index=False).amount.max() df_amount['amount_score'] = pd.cut(df_amount['amount'], bins= monetary_bins, include_lowest=True, labels = monetary_labels) # raise ValueError # Just to test exception handling # Concatenate rfm scores # merge monetary df and frequency df df_semi = df_amount.merge(df_frequency, left_on='matching_id', right_on= 'matching_id', how='left') logger.debug(grouped_past_year.head()) logger.debug(df_semi.head()) df_semi['frequency_score'] = df_semi['frequency_score'].fillna(1) df_final = df_semi.merge(grouped_past_year, left_on='matching_id', right_on= 'matching_id', how='left') # merge monetary/frequency dfs to recency df # import function: rfm_concat, which will catenate integers as a string and then convert back to a single integer from rfm_funcs.rfm_functions import rfm_concat rfm_score = rfm_concat(df_final[('recency_score'), ''], df_final['frequency_score'], df_final['amount_score']) # Append rfm score to final df df_final['rfm_score'] = rfm_score from rfm_funcs.rfm_functions import merge_series score_tuples = merge_series((df_final['matching_id']), df_final['rfm_score']) except Exception as e: logger.error(e) trace_back_string = traceback.format_exc() logger.error(trace_back_string) return 0 try: insert_rfm_scores(score_tuples) except Exception as e: logger.error(e) trace_back_string = traceback.format_exc() logger.error(trace_back_string) return 0 return len(score_tuples) # Not sure there's anything to do with them at this point else: # Didn't get len == 3 logger.error("rfm_edges missing from DB or malformed. Could not perform rfm scoring") return 0 src/server/rfm_funcs/create_bins.py METASEP def create_bins(data, query_date): '''This script will take table data and bin edges for RFM scores for all PAWS donations query_date = date data was queried ''' import pandas as pd import numpy as np import jenkspy from datetime import datetime, date import os #### # read in data from database as list of tuples df = pull_donations_for_rfm() df = pd.DataFrame(df, columns=['matching_id', 'amount', 'close_date']) donations_df['Close_Date'] =pd.to_datetime(df['Close_Date']).dt.date ################################################################################## # Calculate recency bins from recency_bins import recency_bins recency_bins, quantile_scores= recency_bins(donations_df, query_date) ################################################################################### # Calculate frequency bins from frequency_bins import frequency_bins jenks_frequency_bins, human_frequency_bins = frequency_bins(donations_df) def checkIfDuplicates(listOfElems): ''' Check if given list contains any duplicates ''' for elem in listOfElems: if listOfElems.count(elem) > 1: return True return False duplicats_bool = checkIfDuplicates(jenks_frequency_bins) if duplicates_bool == True: final_frequency_bins = human_frequency_bins ################################################################################### # Calculate Amount bins from amount_bins import amount_bins amount_jenks_bins, human_amount_bins = amount_bins(donations_df) ################################################################################### # Write bins to dict bins_dict = {} src/server/rfm_funcs/__init__.py METASEP src/server/pipeline/log_db.py METASEP from datetime import datetime import json from sqlalchemy.sql import text from flask import current_app from sqlalchemy.dialects.postgresql import insert from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, exc, select from config import engine import structlog logger = structlog.get_logger() metadata = MetaData() ex_stat = Table("execution_status", metadata, autoload=True, autoload_with=engine) # Alembic version bfb1262d3195 # CREATE TABLE public.execution_status ( # "_id" serial NOT NULL, # job_id int4 NOT NULL, # stage varchar(32) NOT NULL, # status varchar(32) NOT NULL, # details varchar(128) NOT NULL, # update_stamp timestamp NOT NULL DEFAULT now(), # CONSTRAINT execution_status_pkey null # ); def log_exec_status(job_id: str, exec_stage: str, exec_status: str, job_details: str): """Log execution status (job_id, status, job_details) to DB """ with engine.connect() as connection: ins_stmt = insert(ex_stat).values( # Postgres-specific insert() supporting ON CONFLICT job_id = job_id, stage = exec_stage, status = exec_status, details = json.dumps(job_details) ) # If key already present in DB, do update instead upsert = ins_stmt.on_conflict_do_update( constraint='uq_job_id', set_=dict( stage = exec_stage, status = exec_status, details = json.dumps(job_details)) ) try: connection.execute(upsert) except Exception as e: logger.error("Insert/Update failed, Execution status") logger.error(e) src/server/pipeline/flow_script.py METASEP import time import traceback from api import admin_api from config import engine from flask import current_app from models import ( ManualMatches, PdpContacts, SalesForceContacts, ShelterluvPeople, Volgistics, ) from networkx import Graph, connected_components from sqlalchemy import ( Column, Integer, MetaData, Table, and_, delete, func, insert, or_, select, text, update, ) from pipeline import log_db import structlog logger = structlog.get_logger() def start_flow(): start = time.time() job_id = admin_api.start_job() job_outcome = None trace_back_string = None if not job_id: logger.info("Failed to get job_id") return "busy" try: log_db.log_exec_status(job_id, "start_flow", "executing", "") with engine.begin() as conn: # Here's how we match: # 1. Clear pdp_contacts (the old matches). # 2. Go through each raw data source table (e.g. salesforcecontacts, # volgistics) and copy the latest data for each contact into # pdp_contacts. # 3. Execute a join of pdp_contacts to itself using names, emails, # phone numbers, etc. to get a list of pairs of pdp_contacts ids # that "match." # 4. Join manual_matches to pdp_contacts to get the pdp_contacts ids # of our manual matches. # # Steps 3 and 4 both produce lists of pairs of ids. Next we need to # associate an id with each group of matches. Note that if A matches # B and B matches C, then A and C should get the same match id. We # can thus think of "matches" as edges in a graph of id vertices, # and match groups as connected components in that graph. So: # # 5. Load all the matches into a Graph() and compute its connected # components. # 6. Update each row in pdp_contacts to give it a match id # corresponding to its connected componenet. logger.debug("Clearing pdp_contacts to prepare for match") reset_pdp_contacts_with_unmatched(conn) logger.debug("Computing automatic matches") automatic_matches = get_automatic_matches(conn) logger.debug("Computing manual matches") manual_matches = get_manual_matches(conn) match_graph = Graph() logger.debug("Adding automatic matches to graph") match_graph.add_edges_from(automatic_matches) logger.debug("Adding manual matches to graph") match_graph.add_edges_from(manual_matches) logger.debug("Processing graph") match_groups = connected_components(match_graph) logger.debug("Updating pdp_contacts with match ids") update_matching_ids(match_groups, conn) logger.debug("Finished flow script run") job_outcome = "completed" log_db.log_exec_status(job_id, "flow", "complete", "") except Exception as e: logger.error(e) trace_back_string = traceback.format_exc() logger.error(trace_back_string) finally: if job_outcome != "completed": log_db.log_exec_status(job_id, "flow", "error", trace_back_string) logger.error( "Uncaught error status, setting job status to 'error' " ) job_outcome = "error" return "error" logger.info( "Pipeline execution took %s seconds ", format(time.time() - start) ) return job_outcome def reset_pdp_contacts_with_unmatched(conn): conn.execute(delete(PdpContacts)) conn.execute(SalesForceContacts.insert_into_pdp_contacts()) conn.execute(Volgistics.insert_into_pdp_contacts()) conn.execute(ShelterluvPeople.insert_into_pdp_contacts()) def name_to_array(n): delims = text("'( and | & |, | )'") return func.regexp_split_to_array( func.lower(func.translate(n, text("'\"'"), text("''"))), delims ) def compare_names(n1, n2): return name_to_array(n1).bool_op("&&")(name_to_array(n2)) def get_automatic_matches(conn): pc1 = PdpContacts.__table__.alias() pc2 = PdpContacts.__table__.alias() match_stmt = select(pc1.c._id, pc2.c._id).join( pc2, and_( or_( and_( compare_names(pc1.c.first_name, pc2.c.first_name), compare_names(pc1.c.last_name, pc2.c.last_name), ), and_( compare_names(pc1.c.first_name, pc2.c.last_name), compare_names(pc1.c.last_name, pc2.c.first_name), ), ), or_( func.lower(pc1.c.email) == func.lower(pc2.c.email), pc1.c.mobile == pc2.c.mobile, ), # This ensures we don't get e.g. every row matching itself pc1.c._id < pc2.c._id, ), ) return conn.execute(match_stmt) def get_manual_matches(conn): pc1 = PdpContacts.__table__.alias() pc2 = PdpContacts.__table__.alias() stmt = ( select(pc1.c._id, pc2.c._id) .select_from(ManualMatches) .join( pc1, (ManualMatches.source_type_1 == pc1.c.source_type) & (ManualMatches.source_id_1 == pc1.c.source_id), ) .join( pc2, (ManualMatches.source_type_2 == pc2.c.source_type) & (ManualMatches.source_id_2 == pc2.c.source_id), ) ) return conn.execute(stmt) def update_matching_ids(match_groups, conn): # match_groups doesn't include singletons, but we should still each # unmatched record gets a sane matching_id (that is, its own id) matching_ids_by_id = {id: id for (id,) in conn.execute(select(PdpContacts._id))} for match_group in match_groups: matching_id = min(match_group) for id in match_group: matching_ids_by_id[id] = matching_id # Load all the new id/matching-id pairs into a temp table so that we can do # a fast UPDATE FROM to set all the matching ids in pdp_contacts temp_table = Table( "_tmp_matching_id_update", MetaData(), # this is a temp table, we don't want to affect our knowledge of "real" tables Column("_id", Integer, primary_key=True), Column("matching_id", Integer), prefixes=["TEMPORARY"], postgresql_on_commit="DROP", ) temp_table.create(conn) conn.execute( insert(temp_table), [ {"_id": _id, "matching_id": matching_id} for (_id, matching_id) in matching_ids_by_id.items() ], ) conn.execute( update(PdpContacts) .where(PdpContacts._id == temp_table.c._id) .values(matching_id=temp_table.c.matching_id) ) src/server/pipeline/__init__.py METASEP src/server/db_setup/base_users.py METASEP from config import engine from api import user_api import sqlalchemy as sa import os import structlog logger = structlog.get_logger() try: from secrets_dict import BASEUSER_PW, BASEEDITOR_PW, BASEADMIN_PW except ImportError: # Not running locally logger.debug("Couldn't get BASE user PWs from file, trying environment **********") from os import environ try: BASEUSER_PW = environ['BASEUSER_PW'] BASEEDITOR_PW = environ['BASEEDITOR_PW'] BASEADMIN_PW = environ['BASEADMIN_PW'] except KeyError: # Nor in environment # You're SOL for now logger.error("Couldn't get secrets from file or environment") from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey metadata = sa.MetaData() def create_base_roles(): with engine.connect() as connection: result = connection.execute("select role from pdp_user_roles") role_count = len(result.fetchall()) if role_count == 0: connection.execute("INSERT into pdp_user_roles values (1, 'user') ") connection.execute("INSERT into pdp_user_roles values (2, 'editor') ") connection.execute("INSERT into pdp_user_roles values (9, 'admin') ") else: logger.debug("%d roles already present in DB, not creating", role_count) def create_base_users(): # TODO: Just call create_user for each """ Creates three users (user, editor, admin) for testing Password for each is user name with 'pw' appended """ with engine.connect() as connection: result = connection.execute("select user from pdp_users") user_count = len(result.fetchall()) if user_count == 0: logger.debug("Creating base users") pu = sa.Table("pdp_users", metadata, autoload=True, autoload_with=engine) # user pw_hash = user_api.hash_password(BASEUSER_PW) ins_stmt = pu.insert().values( username="base_user", full_name="Base User", password=pw_hash, active="Y", role=1, ) connection.execute(ins_stmt) # INactive user # Reuse pw hash ins_stmt = pu.insert().values( username="base_user_inact", full_name="Inactive User", password=pw_hash, active="N", role=1, ) connection.execute(ins_stmt) # editor pw_hash = user_api.hash_password(BASEEDITOR_PW) ins_stmt = pu.insert().values( username="base_editor", full_name="Base Editor", password=pw_hash, active="Y", role=2, ) connection.execute(ins_stmt) # admin pw_hash = user_api.hash_password(BASEADMIN_PW) ins_stmt = pu.insert().values( username="base_admin", full_name="Base Admin", password=pw_hash, active="Y", role=9, ) connection.execute(ins_stmt) else: logger.debug("%d users already present in DB, not creating", user_count) def populate_rfm_mapping_table(overwrite=False): """Populate the rfm_mapping table if empty or overwrite is True.""" with engine.connect() as connection: def table_empty(): result = connection.execute("select count(*) from rfm_mapping;") row_count = result.fetchone()[0] return row_count == 0 if overwrite or table_empty(): logger.debug("Populating rfm_mapping table") if not table_empty(): logger.debug("'overwrite=True', truncating rfm_mapping table") connection.execute("TRUNCATE TABLE rfm_mapping;") if os.path.exists('server'): # running locally file_path = os.path.normpath('server/alembic/populate_rfm_mapping.sql') elif os.path.exists('alembic'): # running on Docker file_path = os.path.normpath('alembic/populate_rfm_mapping.sql') else: # logger.error("ERROR: Can't find a path to populate script!!!!!! CWD is %s", os.getcwd()) return logger.debug("Loading sql script at " + file_path) f = open(file_path) populate_query = f.read() f.close() result = connection.execute(populate_query) if table_empty(): logger.error("ERROR: rfm_mapping table WAS NOT POPULATED") else: logger.debug("rfm_mapping table already populated; overwrite not True so not changing.") return def populate_sl_event_types(): """If not present, insert values for shelterluv animal event types.""" with engine.connect() as connection: result = connection.execute("select id from sl_event_types") type_count = len(result.fetchall()) if type_count == 0: print("Inserting SL event types") connection.execute("""INSERT into sl_event_types values (1, 'Outcome.Adoption'), (2, 'Outcome.Foster'), (3, 'Outcome.ReturnToOwner'), (4, 'Intake.AdoptionReturn'), (5, 'Intake.FosterReturn'); """) else: logger.debug("%d event types already present in DB, not creating", type_count) src/server/db_setup/__init__.py METASEP src/server/api/user_api.py METASEP from hashlib import pbkdf2_hmac from os import urandom, environ import pytest, codecs, random from datetime import datetime from api.api import user_api from sqlalchemy.sql import text from config import engine from flask import request, redirect, jsonify, current_app, abort, json from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, exc, select from api import jwt_ops import structlog logger = structlog.get_logger() metadata = MetaData() # Salt for hashing storing passwords SALT_LENGTH = 32 # Keep a journal of user activity def log_user_action(user, event_class, detail): """ Write log entry to db """ puj = Table("pdp_user_journal", metadata, autoload=True, autoload_with=engine) with engine.connect() as connection: ins_stmt = puj.insert().values(username=user, event_type=event_class, detail=detail) try: connection.execute(ins_stmt) except Exception as e: logger.error(e) def password_is_strong(password): """ Check plain-text password against strength rules.""" def has_digit(test_string): """Test if any character is a digit.""" for c in test_string: if c.isdigit(): return True return False def has_alpha(test_string): """Test if any character is alphabetic.""" for c in test_string: if c.isalpha(): return True return False if (len(password) > 11 # and has_alpha(password) # and has_digit(password) ): return True else: return False def hash_password(password): """ Generate salt+hash for storing in db""" salt = urandom(SALT_LENGTH) hash = pbkdf2_hmac("sha512", bytes(password, "utf8"), salt, 500000) hash_for_db = salt + hash return hash_for_db def check_password(password, salty_hash): """Check presented cleartext password against DB-type salt+hash, return True if they match""" salt = salty_hash[0:SALT_LENGTH] hash = salty_hash[SALT_LENGTH:] # Use salt from db to hash what user gave us pw_bytes = bytes(password, "utf8") hash_of_presented = pbkdf2_hmac("sha512", pw_bytes, salt, 500000) return hash.hex() == hash_of_presented.hex() ### No authorization required ############################ @user_api.route("/api/user/test", methods=["GET"]) def user_test(): """ Liveness test, does not require JWT """ logger.debug("/api/user/test") return jsonify(("OK from User Test @ " + str(datetime.now()))) @user_api.route("/api/user/test_log", methods=["GET"]) def user_test_log_error(): """Does not require JWT - see various log levels""" logger.debug("debug: /api/user/test_log_error") logger.info("info: /api/user/test_log_error") logger.warn("warn: /api/user/test_log_error") logger.error("error: /api/user/test_log_error") logger.critical("critical: /api/user/test_log_error") return jsonify(("Generated log entries as various levals @ " + str(datetime.now()))) @user_api.route("/api/user/test_fail", methods=["GET"]) def user_test_fail(): """ Liveness test, always fails with 401""" return jsonify("Here's your failure"), 401 @user_api.route("/api/user/timeout/<int:new_timeout>", methods=["GET"]) def user_override_timeout(new_timeout): """ Override JWT expiration setting for testing. Allows a value up to JWT_MAX_TIMEOUT (from app.py). This will affect, of course, only future tokens. """ if (new_timeout > current_app.config["JWT_MAX_TIMEOUT"] ) : new_timeout = current_app.config["JWT_MAX_TIMEOUT"] current_app.config["JWT_ACCESS_TOKEN_EXPIRES"] = new_timeout return jsonify("Timeout set to " + str(new_timeout) + " seconds"), 200 @user_api.route("/api/user/login", methods=["POST"]) def user_login(): """ Validate user in db, return JWT if legit and active. Expects json-encoded form data {"username" :, "password": } """ def dummy_check(): """Perform a fake password hash check to take as much time as a real one.""" pw_bytes = bytes('password', "utf8") check_password('password', pw_bytes) try: post_dict = json.loads(request.data) username = post_dict["username"] presentedpw = post_dict["password"] except: dummy_check() # Take the same time as with well-formed requests return jsonify("Bad credentials"), 401 if not (isinstance(username, str) and isinstance(presentedpw, str) ): dummy_check() # Take the same time as with well-formed requests return jsonify("Bad credentials"), 401 # Don't give us ints, arrays, etc. with engine.connect() as connection: pwhash = None s = text( """select password, pdp_user_roles.role, active from pdp_users left join pdp_user_roles on pdp_users.role = pdp_user_roles._id where username=:u """ ) s = s.bindparams(u=username) result = connection.execute(s) if result.rowcount: # Did we get a match on username? pwhash, role, is_active = result.fetchone() else: log_user_action(username, "Failure", "Invalid username") dummy_check() return jsonify("Bad credentials"), 401 if is_active.lower() == "y" and check_password(presentedpw, pwhash): # Yes, user is active and password matches token = jwt_ops.create_token(username, role) log_user_action(username, "Success", "Logged in") return token else: log_user_action(username, "Failure", "Bad password or inactive") # No dummy_check needed as we ran a real one to get here return jsonify("Bad credentials"), 401 ### Unexpired JWT required ############################ @user_api.route("/api/user/test_auth", methods=["GET"]) @jwt_ops.jwt_required() def user_test_auth(): """ Liveness test, requires JWT """ sysname = '?' # Ensure we are talking to the expected host try: sysname = environ['computername'] except: pass try: sysname = environ['HOSTNAME'] except: pass return jsonify(("OK from User Test - Auth [" + sysname + "] @" + str(datetime.now()))) # Logout is not strictly needed; client can just delete JWT, but good for logging @user_api.route("/api/user/logout", methods=["POST"]) @jwt_ops.jwt_required() def user_logout(): user_name = '' old_jwt = jwt_ops.validate_decode_jwt() # If token bad, should be handled & error message sent by jwt_required() and we won't get here if old_jwt: user_name = old_jwt['sub'] # Log the request log_user_action(user_name, "Success", "Logged out") return jsonify("Logged out") # Generate a new access token @user_api.route("/api/user/refresh", methods=["GET"]) @jwt_ops.jwt_required() def user_refresh(): """ If user still active, send back an access_token with a new expiration stamp """ old_jwt = jwt_ops.validate_decode_jwt() # If token bad, should be handled & error message sent by jwt_required() and we won't get here if old_jwt: user_name = old_jwt['sub'] with engine.connect() as connection: s = text( """select active from pdp_users where username=:u """ ) s = s.bindparams(u=user_name) result = connection.execute(s) if result.rowcount: # Did we get a match on username? is_active = result.fetchone() else: log_user_action(user_name, "Failure", "Valid JWT presented for refesh attempt on unknown username") return jsonify("Bad credentials"), 401 if is_active[0].lower() == 'y': # In the user DB and still Active? token = jwt_ops.create_token(user_name,old_jwt['role']) log_user_action(user_name, "Success", "Refreshed token") return token else: return jsonify("Bad credentials"), 401 ### Unexpired *Admin* JWT required ############################ @user_api.route("/api/admin/user/create", methods=["POST"]) @jwt_ops.admin_required def user_create(): """Create user record Requires admin role Form POST JSON Parameters ---------- username : str full_name : str password : str role : str, one of `user`, `editor`, `admin` Returns ---------- User created: 201 + username Invalid role: 422 + "Bad role" Duplicate user: 409 + DB error """ try: post_dict = json.loads(request.data) new_user = post_dict["username"] fullname = post_dict["full_name"] userpw = post_dict["password"] user_role = post_dict["role"] except: return jsonify("Missing one or more parameters"), 400 requesting_user = jwt_ops.validate_decode_jwt()['sub'] pw_hash = hash_password(userpw) pu = Table("pdp_users", metadata, autoload=True, autoload_with=engine) pr = Table("pdp_user_roles", metadata, autoload=True, autoload_with=engine) with engine.connect() as connection: # Build dict of roles role_dict = {} r = select((pr.c.role, pr.c._id)) rr = connection.execute(r) fa = rr.fetchall() for row in fa: role_dict[row[0]] = row[1] # TODO: possible to do directly in sa? try: role_val = role_dict[user_role] except KeyError as e: logger.error("Role not found %s", e) log_user_action( requesting_user, "Failure", "Bad role (" + user_role + ") in user_create for " + new_user, ) return jsonify("Bad role"), 422 ins_stmt = pu.insert().values( # _id=default, username=new_user, password=pw_hash, full_name=fullname, active="Y", role=role_val, ) try: connection.execute(ins_stmt) except exc.IntegrityError as e: # Uniqueness violation return jsonify(e.orig.pgerror), 409 # if created, 201 log_user_action( requesting_user, "Success", "Created user " + new_user + " with role: " + user_role, ) return jsonify(new_user), 201 @user_api.route("/api/admin/user/get_user_count", methods=["GET"]) @jwt_ops.admin_required def get_user_count(): """Return number of records in pdp_users table """ with engine.connect() as connection: s = text("select count(user) from pdp_users;") result = connection.execute(s) user_count = result.fetchone() return jsonify(user_count[0]) @user_api.route("/api/admin/user/check_name", methods=["POST"]) @jwt_ops.admin_required def check_username(): """Return 1 if username exists already, else 0.""" try: post_dict = json.loads(request.data) test_username = post_dict["username"] except: return jsonify("Missing username"), 400 with engine.connect() as connection: s = text( """select count(username) from pdp_users where username=:u """ ) s = s.bindparams(u=test_username) result = connection.execute(s) if result.rowcount: # As we're doing a count() we *should* get a result user_exists = result.fetchone()[0] else: log_user_action(test_username, "Failure", "Error when checking username") return jsonify("Error checking username"), 500 return jsonify(user_exists) @user_api.route("/api/admin/user/update", methods=["POST"]) @jwt_ops.admin_required def user_update(): """Update existing user record """ try: post_dict = json.loads(request.data) username = post_dict["username"] except: return jsonify("Must specify username"), 400 update_dict = {} # Need to be a bit defensive here & select what we want instead of taking what we're given for key in ["full_name", "active", "role", "password"]: try: val = post_dict[key] update_dict[key] = val except: pass if not update_dict: logger.debug("Update called with nothing to update") return jsonify("No changed items specified") # If nothing to do, declare victory if "password" in update_dict.keys(): if password_is_strong(update_dict['password']): update_dict['password'] = hash_password(update_dict['password']) else: return jsonify("Password too weak") # We have a variable number of columns to update. # We could generate a text query on the fly, but this seems the perfect place to use the ORM # and let it handle the update for us. from sqlalchemy import update from sqlalchemy.orm import Session, sessionmaker Session = sessionmaker(engine) session = Session() # #TODO: Figure out why context manager doesn't work or do try/finally pr = Table("pdp_user_roles", metadata, autoload=True, autoload_with=engine) if ("role" in update_dict.keys()): # We are changing the role # Build dict of roles {name: id} role_dict = {} r = select((pr.c.role, pr.c._id)) rr = session.execute(r) fa = rr.fetchall() for row in fa: role_dict[row[0]] = row[1] logger.debug("Found %d roles", len(role_dict)) # Replace the role name with the corresponding id for update try: # We could verify that the role is actually different - doesn't seem worth the effort update_dict["role"] = role_dict[update_dict["role"]] except KeyError: logger.error("Attempted to change user '%s' to invalid role '%s'", username, update_dict["role"]) session.close() return jsonify("Invalid role specified"), 400 PU = Table("pdp_users", metadata, autoload=True, autoload_with=engine) stmt = update(PU).where(PU.columns.username == username).values(update_dict).\ execution_options(synchronize_session="fetch") result = session.execute(stmt) session.commit() session.close() return jsonify("Updated") @user_api.route("/api/admin/user/get_users", methods=["GET"]) @jwt_ops.admin_required def user_get_list(): """Return list of users""" with engine.connect() as connection: s = text( """ select username, full_name, active, pr.role from pdp_users as pu left join pdp_user_roles as pr on pu.role = pr._id order by username """ ) result = connection.execute(s) query_result_json = [dict(row) for row in result] return jsonify(query_result_json), 200 @user_api.route("/api/admin/user/get_info/<string:username>", methods=["GET"]) @jwt_ops.admin_required def user_get_info(username): """Return info on a specified user""" with engine.connect() as connection: s = text( """ select username, full_name, active, pr.role from pdp_users as pu left join pdp_user_roles as pr on pu.role = pr._id where username=:u """ ) s = s.bindparams(u=username) result = connection.execute(s) if result.rowcount: user_row = result.fetchone() else: log_user_action(username, "Failure", "Error when getting user info") return jsonify("Username not found"), 400 return jsonify( dict(zip(result.keys(), user_row)) ), 200 src/server/api/jwt_ops.py METASEP from functools import wraps from flask import Flask, jsonify, request, current_app from flask_jwt_extended import ( JWTManager, jwt_required, create_access_token, get_jwt_identity, verify_jwt_in_request, get_jwt ) from app import app, jwt # Wraps funcs to require admin role to execute def admin_required(fn): @wraps(fn) def wrapper(*args, **kwargs): verify_jwt_in_request() claims = get_jwt() if claims["role"] != "admin": return jsonify(msg="Admins only!"), 403 else: return fn(*args, **kwargs) return wrapper def create_token(username, accesslevel): """ Create a JWT *access* token for the specified user ('sub:') and role ('role:'). """ # Identity can be any data that is json serializable, we just use username addl_claims = {'role': accesslevel} new_token = create_access_token(identity=username, additional_claims=addl_claims) return jsonify(access_token=new_token) def validate_decode_jwt(): """ If valid, return jwt fields as a dictionary, else None """ jwtdict = None try: jwtdict = verify_jwt_in_request()[1] except: pass # Wasn't valid - either expired or failed validation return jwtdict src/server/api/internal_api.py METASEP from datetime import datetime import structlog from flask import jsonify from api.API_ingest import ingest_sources_from_api, salesforce_contacts from api.api import internal_api from rfm_funcs.create_scores import create_scores from api.API_ingest import updated_data logger = structlog.get_logger() ### Internal API endpoints can only be accessed from inside the cluster; ### they are blocked by location rule in NGINX config # Verify that this can only be accessed from within cluster @internal_api.route("/api/internal/test", methods=["GET"]) def user_test(): """ Liveness test, does not require JWT """ return jsonify(("OK from INTERNAL Test @ " + str(datetime.now()))) @internal_api.route("/api/internal/test/test", methods=["GET"]) def user_test2(): """ Liveness test, does not require JWT """ return jsonify(("OK from INTERNAL test/test @ " + str(datetime.now()))) @internal_api.route("/api/internal/ingestRawData", methods=["GET"]) def ingest_raw_data(): try: ingest_sources_from_api.start() except Exception as e: logger.error(e) return jsonify({'outcome': 'OK'}), 200 @internal_api.route("/api/internal/create_scores", methods=["GET"]) def hit_create_scores(): logger.info("Hitting create_scores() ") tuple_count = create_scores() logger.info("create_scores() processed %s scores", str(tuple_count) ) return jsonify(200) @internal_api.route("/api/internal/get_updated_data", methods=["GET"]) def get_contact_data(): logger.debug("Calling get_updated_contact_data()") contact_json = updated_data.get_updated_contact_data() logger.debug("Returning %d contact records", len(contact_json) ) return jsonify(contact_json), 200 src/server/api/file_uploader.py METASEP import pandas as pd from config import engine from donations_importer import validate_import_sfd from flask import current_app from models import ManualMatches, SalesForceContacts, ShelterluvPeople, Volgistics from shifts_importer import validate_import_vs from werkzeug.utils import secure_filename import structlog logger = structlog.get_logger() SUCCESS_MSG = "Uploaded Successfully!" def validate_and_arrange_upload(file): logger.info("Start uploading file: %s ", file.filename) filename = secure_filename(file.filename) file_extension = filename.rpartition(".")[2] with engine.begin() as conn: determine_upload_type(file, file_extension, conn) def determine_upload_type(file, file_extension, conn): # Yes, this method of discovering what kind of file we have by looking at # the extension and columns is silly. We'd like to get more of our data from # automatically pulling from vendor APIs directly, in which case we'd know # what kind of data we had. if file_extension == "csv": logger.debug("File extension is CSV") df = pd.read_csv(file, dtype="string") if {"salesforcecontacts", "volgistics", "shelterluvpeople"}.issubset(df.columns): logger.debug("File appears to be salesforcecontacts, volgistics, or shelterluvpeople (manual)") ManualMatches.insert_from_df(df, conn) return elif {"Animal_ids", "Internal-ID"}.issubset(df.columns): logger.debug("File appears to be shelterluvpeople") ShelterluvPeople.insert_from_df(df, conn) return if file_extension == "xlsx": excel_file = pd.ExcelFile(file) if {"Master", "Service"}.issubset(excel_file.sheet_names): logger.debug("File appears to be Volgistics") # Volgistics validate_import_vs(file, conn) Volgistics.insert_from_file(excel_file, conn) return df = pd.read_excel(excel_file) if "Contact ID 18" in df.columns: # Salesforce something-or-other if "Amount" in df.columns: # Salesforce donations logger.debug("File appears to be Salesforce donations") validate_import_sfd(file, conn) return else: # Salesforce contacts logger.debug("File appears to be Salesforce contacts") SalesForceContacts.insert_from_file_df(df, conn) return logger.error("Don't know how to process file: %s", file.filename) src/server/api/fake_data.py METASEP """ Fake data that can be returned when an API token is missing for local development, or for running pytest Shelterluv Data contains: Matched: Animal & Event End point """ shelterluv_data = { 'animals': { "animal_details": { '12345': { "Age": 24, "DOBUnixTime": 1568480456, "Name": "Lola aka Fake Cat", "Type": "Cat", "Photos": ["https://images.unsplash.com/photo-1456926631375-92c8ce872def?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8OHx8YW5pbWFsfGVufDB8fDB8fA%3D%3D&w=1000&q=80"], "Status": "Healthy In Home", }, }, "person_details": { "shelterluv_short_id": 2, }, }, 'events': { '12345':[ { 'AssociatedRecords': [ {'Id': 12345, 'Type': 'Animal' }, {'Id': 12345, 'Type': 'Person'}, ], 'Subtype': 'Foster Home', 'Time': '1602694822', 'Type': 'Outcome.Adoption', 'User': 'Fake User', }, ] }, } def sl_mock_data(end_point: str)-> dict: """ Shelterluv mock data. Takes the end_point as a str of `animals` or `events` and returns a dict representing a test data for that end_point. """ return shelterluv_data.get(end_point) src/server/api/common_api.py METASEP from api.api import common_api from config import engine from flask import jsonify , current_app from sqlalchemy.sql import text import requests import time from datetime import datetime import structlog logger = structlog.get_logger() from api.fake_data import sl_mock_data try: from secrets_dict import SHELTERLUV_SECRET_TOKEN except ImportError: # Not running locally logger.debug("Couldn't get SHELTERLUV_SECRET_TOKEN from file, trying environment **********") from os import getenv SHELTERLUV_SECRET_TOKEN = getenv('SHELTERLUV_SECRET_TOKEN') if not SHELTERLUV_SECRET_TOKEN: logger.warn("Couldn't get secrets from file or environment - defaulting to Fake Data") from api import jwt_ops @common_api.route('/api/timeout_test/<duration>', methods=['GET']) def get_timeout(duration): start = datetime.now().strftime("%H:%M:%S"); time.sleep(int(duration)) stop = datetime.now().strftime("%H:%M:%S"); results = jsonify({'result': 'success', 'duration': duration, 'start': start, 'stop': stop}) return results @common_api.route('/api/contacts/<search_text>', methods=['GET']) @jwt_ops.jwt_required() def get_contacts(search_text): with engine.connect() as connection: search_text = search_text.lower() names = search_text.split(" ") if len(names) == 2: query = text("""select pdp_contacts.*, rfm_scores.rfm_score, rfm_label, rfm_color, rfm_text_color from pdp_contacts left join rfm_scores on rfm_scores.matching_id = pdp_contacts.matching_id left join rfm_mapping on rfm_mapping.rfm_value = rfm_scores.rfm_score where archived_date is null AND ( (lower(first_name) like lower(:name1) and lower(last_name) like lower(:name2)) OR (lower(first_name) like lower(:name2) and lower(last_name) like lower(:name1)) ) order by lower(last_name), lower(first_name)""") query_result = connection.execute(query, name1='{}%'.format(names[0]), name2='{}%'.format(names[1])) elif len(names) == 1: query = text("""select pdp_contacts.*, rfm_scores.rfm_score, rfm_label, rfm_color, rfm_text_color from pdp_contacts left join rfm_scores on rfm_scores.matching_id = pdp_contacts.matching_id left join rfm_mapping on rfm_mapping.rfm_value = rfm_scores.rfm_score where archived_date is null AND ( lower(first_name) like lower(:search_text) OR lower(last_name) like lower(:search_text) ) order by lower(last_name), lower(first_name)""") query_result = connection.execute(query, search_text='{}%'.format(search_text)) query_result_json = [dict(row) for row in query_result] results = jsonify({'result': query_result_json}) return results @common_api.route('/api/rfm/<label>/<limit>', methods=['GET']) @common_api.route('/api/rfm/<label>', methods=['GET']) @jwt_ops.jwt_required() def get_rfm(label, limit=None): with engine.connect() as connection: query_string = """select pdp_contacts.*, rfm_scores.rfm_score, rfm_label, rfm_color, rfm_text_color from pdp_contacts left join rfm_scores on rfm_scores.matching_id = pdp_contacts.matching_id left join rfm_mapping on rfm_mapping.rfm_value = rfm_scores.rfm_score where archived_date is null AND rfm_label like :label and source_type = 'salesforcecontacts' order by lower(last_name), lower(first_name)""" if limit: query = text(query_string + " limit :limit") query_result = connection.execute(query, label='{}%'.format(label), limit=limit) else: query = text(query_string) query_result = connection.execute(query, label='{}%'.format(label)) query_result_json = [dict(row) for row in query_result] results = jsonify({'result': query_result_json}) return results @common_api.route('/api/rfm/labels', methods=['GET']) @jwt_ops.jwt_required() def get_rfm_labels(): with engine.connect() as connection: query = text("""select rfm_label, rfm_text_color, rfm_color, count(rfm_value) from rfm_scores left join rfm_mapping on rfm_mapping.rfm_value = rfm_scores.rfm_score group by rfm_label, rfm_text_color, rfm_color;""") query_result = connection.execute(query) query_result_json = [dict(row) for row in query_result] results = jsonify({'result': query_result_json}) return results @common_api.route('/api/360/<matching_id>', methods=['GET']) @jwt_ops.jwt_required() def get_360(matching_id): result = {} with engine.connect() as connection: query = text("""select pdp_contacts.*, rfm_scores.rfm_score, rfm_label, rfm_color, rfm_text_color from pdp_contacts left join rfm_scores on rfm_scores.matching_id = pdp_contacts.matching_id left join rfm_mapping on rfm_mapping.rfm_value = rfm_scores.rfm_score where pdp_contacts.matching_id = :matching_id and archived_date is null""") query_result = connection.execute(query, matching_id=matching_id) result["contact_details"] = [dict(row) for row in query_result] for row in result["contact_details"]: if row["source_type"] == "salesforcecontacts": donations_query = text("""select cast (close_date as text), cast (amount as float), donation_type, primary_campaign_source from salesforcedonations where contact_id = :salesforcecontacts_id""") salesforce_contacts_query_result = connection.execute(donations_query, salesforcecontacts_id=row["source_id"]) salesforce_donations_results = [dict(row) for row in salesforce_contacts_query_result] if len(salesforce_donations_results): if not 'donations' in result: result['donations'] = salesforce_donations_results else: result['donations'].append(salesforce_donations_results) if row["source_type"] == "volgistics": # Shifts data shifts_query = text("""select volg_id, assignment, site, from_date, cast(hours as float) from volgisticsshifts where volg_id = :volgistics_id order by from_date desc limit 5""") volgistics_shifts_query_result = connection.execute(shifts_query, volgistics_id=row["source_id"]) volgisticsshifts_results = [] for r in volgistics_shifts_query_result: shifts = dict(r) volgisticsshifts_results.append(shifts) result['shifts'] = volgisticsshifts_results # Volunteer activity query_text = """ with activity as (select from_date, hours from volgisticsshifts where volg_id = :volgistics_id), alltime as (select min(from_date) as start_date, sum(hours) as life_hours from activity), ytd as (select sum(hours) as ytd_hours from activity where extract(year from from_date) = extract(year from current_date)) select cast(start_date as text), cast(life_hours as float), cast(ytd_hours as float) from alltime, ytd; """ hours_query = text(query_text) hours_query_result = connection.execute(hours_query, volgistics_id=row["source_id"]) result['activity'] = [dict(row) for row in hours_query_result] if row["source_type"] == "shelterluvpeople": shelterluv_id = row["source_id"] result["shelterluv_id"] = shelterluv_id return jsonify({'result': result}) @common_api.route('/api/person/<matching_id>/animals', methods=['GET']) @jwt_ops.jwt_required() def get_animals(matching_id): result = { "person_details": {}, "animal_details": {} } if not SHELTERLUV_SECRET_TOKEN: return jsonify(sl_mock_data('animals')) with engine.connect() as connection: query = text("select * from pdp_contacts where matching_id = :matching_id and source_type = 'shelterluvpeople' and archived_date is null") query_result = connection.execute(query, matching_id=matching_id) rows = [dict(row) for row in query_result] if len(rows) > 0: for row in rows: shelterluv_id = row["source_id"] person_url = f"http://shelterluv.com/api/v1/people/{shelterluv_id}" person_details = requests.get(person_url, headers={"x-api-key": SHELTERLUV_SECRET_TOKEN}).json() if "ID" in person_details: result["person_details"]["shelterluv_short_id"] = person_details["ID"] animal_ids = person_details["Animal_ids"] for animal_id in animal_ids: animal_url = f"http://shelterluv.com/api/v1/animals/{animal_id}" animal_details = requests.get(animal_url, headers={"x-api-key": SHELTERLUV_SECRET_TOKEN}).json() result["animal_details"][animal_id] = animal_details return result @common_api.route('/api/person/<matching_id>/animal/<animal_id>/events', methods=['GET']) @jwt_ops.jwt_required() def get_person_animal_events(matching_id, animal_id): result = {} events = [] if not SHELTERLUV_SECRET_TOKEN: return jsonify(sl_mock_data('events')) with engine.connect() as connection: query = text("select * from pdp_contacts where matching_id = :matching_id and source_type = 'shelterluvpeople' and archived_date is null") query_result = connection.execute(query, matching_id=matching_id) rows = [dict(row) for row in query_result] if len(rows) > 0: row = rows[0] shelterluv_id = row["source_id"] animal_url = f"http://shelterluv.com/api/v1/animals/{animal_id}/events" event_details = requests.get(animal_url, headers={"x-api-key": SHELTERLUV_SECRET_TOKEN}).json() for event in event_details["events"]: for record in event["AssociatedRecords"]: if record["Type"] == "Person" and record["Id"] == shelterluv_id: events.append(event) result[animal_id] = events return result @common_api.route('/api/person/<matching_id>/support', methods=['GET']) @jwt_ops.jwt_required() def get_support_oview(matching_id): """Return these values for the specified match_id: largest gift, date for first donation, total giving, number of gifts, amount of first gift, is recurring donor If consuming this, check number_of_gifts first. If 0, there's no more data available, so don't try to read any other fields - they may not exist. """ # One complication: a single match_id can map to multiple SF ids, so these queries need to # run on a list of of contact_ids. # First: get the list of salsforce contact_ids associated with the matching_id qcids = text("select source_id FROM pdp_contacts where matching_id = :matching_id and source_type = 'salesforcecontacts';") oview_fields = {} with engine.connect() as connection: query_result = connection.execute(qcids, matching_id=matching_id) rows = [dict(row) for row in query_result] id_list = [] if len(rows) > 0: for row in rows: if row['source_id'].isalnum(): id_list.append(row['source_id']) else: logger.warn("salesforcecontacts source_id %s has non-alphanumeric characters; will not be used", str(row['source_id'])) if len(id_list) == 0: # No ids to query oview_fields['number_of_gifts'] = 0 # Marker for no support data return jsonify(oview_fields) sov1 = text("""SELECT max(amount) as largest_gift, min(close_date) as first_donation_date, sum(amount) as total_giving, count(amount) as number_of_gifts FROM salesforcedonations as sfd WHERE contact_id IN :id_list ; """) sov1 = sov1.bindparams(id_list=tuple(id_list)) sov1_result = connection.execute(sov1) # query = query.bindparams(values=tuple(values # rows = [dict(row) for row in sov1_result] row = dict(sov1_result.fetchone()) if row['largest_gift'] : oview_fields['largest_gift'] = float(row['largest_gift']) else: oview_fields['largest_gift'] = 0.0 # oview_fields['largest_gift'] = float(rows[0]['largest_gift']) if row['first_donation_date']: oview_fields['first_donation_date'] = str(row['first_donation_date']) else: oview_fields['first_donation_date'] = '' if row['total_giving']: oview_fields['total_giving'] = float(row['total_giving']) else: oview_fields['total_giving'] = 0.0 oview_fields['number_of_gifts'] = row['number_of_gifts'] # These could be could combined them into a single complex query sov2 = text("""SELECT amount as first_gift_amount FROM salesforcedonations as sfd WHERE contact_id IN :id_list ORDER BY close_date asc limit 1 ; """) sov2 = sov2.bindparams(id_list=tuple(id_list)) sov2_result = connection.execute(sov2) if sov2_result.rowcount: fga = sov2_result.fetchone()[0] if fga: oview_fields['first_gift_amount'] = float(fga) else: oview_fields['first_gift_amount'] = 0.0 else: oview_fields['first_gift_amount'] = 0.0 sov3 = text("""SELECT recurring_donor as is_recurring FROM salesforcedonations as sfd WHERE contact_id IN :id_list ORDER BY close_date DESC LIMIT 1; """ ) sov3 = sov3.bindparams(id_list=tuple(id_list)) sov3_result = connection.execute(sov3) if sov3_result.rowcount: oview_fields['is_recurring'] = sov3_result.fetchone()[0] else: oview_fields['is_recurring'] = False rfm = text("""SELECT rfm_score, rfm_color, rfm_label, rfm_text_color FROM rfm_scores left join rfm_mapping on rfm_mapping.rfm_value = rfm_score WHERE matching_id = :match_id; """) rfm = rfm.bindparams(match_id = matching_id) rfm_result = connection.execute(rfm) if rfm_result.rowcount: row = rfm_result.fetchone() oview_fields['rfm_score'] = row[0] oview_fields['rfm_color'] = row[1] oview_fields['rfm_label'] = row[2] oview_fields['rfm_text_color'] = row[3] else: oview_fields['rfm_score'] = '' oview_fields['rfm_color'] = '' oview_fields['rfm_label'] = '' oview_fields['rfm_text_color'] = '' return jsonify(oview_fields) else: # len(rows) == 0 logger.warn('No SF contact IDs found for matching_id %', str(matching_id)) oview_fields['number_of_gifts'] = 0 # Marker for no data return jsonify(oview_fields) @common_api.route('/api/last_analysis', methods=['GET']) @jwt_ops.jwt_required() def get_last_analysis(): """ Return the UTC string (e.g., '2021-12-11T02:29:14.830371') representing when the last analysis run succesfully completed. Returns an empty string if no results. """ last_run = '' last_stamp = """ select update_stamp from execution_status where stage = 'flow' and status = 'complete' order by update_stamp desc limit 1; """ with engine.connect() as connection: result = connection.execute(last_stamp) if result.rowcount: row = result.fetchone() last_run_dt = row[0] # We get as a datetime object last_run = last_run_dt.isoformat() return last_run src/server/api/api.py METASEP from flask import Blueprint from flask_cors import CORS admin_api = Blueprint("admin_api", __name__) common_api = Blueprint("common_api", __name__) user_api = Blueprint("user_api", __name__) internal_api = Blueprint("internal_api", __name__) # TODO: SECURITY - CORS is wide open for development, needs to be limited for production CORS(user_api) CORS(common_api) CORS(admin_api) src/server/api/admin_api.py METASEP from api.api import admin_api import os import time from datetime import datetime import json from sqlalchemy.sql import text from sqlalchemy.dialects.postgresql import insert from sqlalchemy import Table, MetaData from pipeline import flow_script from config import engine from flask import request, redirect, jsonify from api.file_uploader import validate_and_arrange_upload from sqlalchemy.orm import sessionmaker from api import jwt_ops from config import RAW_DATA_PATH import structlog logger = structlog.get_logger() ALLOWED_EXTENSIONS = {"csv", "xlsx"} def __allowed_file(filename): return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS # file upload tutorial @admin_api.route("/api/file", methods=["POST"]) @jwt_ops.admin_required def upload_csv(): for file in request.files.getlist("file"): if __allowed_file(file.filename): try: validate_and_arrange_upload(file) except Exception as e: logger.exception(e) finally: file.close() return redirect(request.origin) @admin_api.route("/api/listCurrentFiles", methods=["GET"]) @jwt_ops.admin_required def list_current_files(): result = None logger.info("Start returning file list") file_list_result = os.listdir(RAW_DATA_PATH) if len(file_list_result) > 0: result = file_list_result return jsonify(result) @admin_api.route("/api/execute", methods=["POST"]) @jwt_ops.admin_required def execute(): logger.info("Execute flow") job_outcome = flow_script.start_flow() # 'busy', 'completed', or 'nothing to do' logger.info("Job outcome: %s", str(job_outcome)) # -------- Skip update if 'busy' or 'nothing to do' as nothing changed ? ------ current_time = datetime.now().ctime() statistics = get_statistics() last_execution_details = {"executionTime": current_time, "stats": statistics} last_ex_json = (json.dumps(last_execution_details)) metadata = MetaData() kvt = Table("kv_unique", metadata, autoload=True, autoload_with=engine) # Write Last Execution stats to DB # See Alembic Revision ID: 05e0693f8cbb for table definition with engine.connect() as connection: ins_stmt = insert(kvt).values( # Postgres-specific insert() supporting ON CONFLICT keycol = 'last_execution_time', valcol = last_ex_json, ) # If key already present in DB, do update instead upsert = ins_stmt.on_conflict_do_update( constraint='kv_unique_keycol_key', set_=dict(valcol=last_ex_json) ) try: connection.execute(upsert) except Exception as e: logger.error("Insert/Update failed on Last Execution stats") logger.error(e) # ------------------------------------------------------------------------------- if job_outcome == 'busy': return jsonify({'outcome' : 'Already analyzing'}), 503 elif job_outcome == 'nothing to do': return jsonify({'outcome' : 'No uploaded files to process'}), 200 elif job_outcome == 'completed' : return jsonify({'outcome' : 'Analysis completed'}), 200 elif job_outcome == 'error' : return jsonify({'outcome' : 'Analysis not completed due to error'}), 500 else: return jsonify({'outcome' : 'Unknown status: ' + str(job_outcome)}), 200 def get_statistics(): with engine.connect() as connection: query_matches = text("SELECT count(*) FROM (SELECT distinct matching_id from pdp_contacts) as a;") query_total_count = text("SELECT count(*) FROM pdp_contacts;") matches_count_query_result = connection.execute(query_matches) total_count_query_result = connection.execute(query_total_count) # Need to iterate over the results proxy results = { "Distinct Matching Groups Count": [dict(row) for row in matches_count_query_result][0]["count"], "Total Contacts Count": [dict(row) for row in total_count_query_result][0]["count"] } return results @admin_api.route("/api/statistics", methods=["GET"]) @jwt_ops.admin_required def list_statistics(): """ Pull Last Execution stats from DB. """ logger.info("list_statistics() request") last_execution_details = '{}' # Empty but valid JSON engine.dispose() # we don't want other process's conn pool with engine.connect() as conn: try: # See Alembic Revision ID: 05e0693f8cbb for table definition s = text("select valcol from kv_unique where keycol = 'last_execution_time';") result = conn.execute(s) if result.rowcount > 0: last_execution_details = result.fetchone()[0] except Exception as e: logger.error("Failure reading Last Execution stats from DB - OK on first run") # Will happen on first run, shouldn't after return last_execution_details @admin_api.route("/api/get_execution_status", methods=["GET"]) @jwt_ops.admin_required def get_exec_status(): """ Get the execution status record from the DB for a running job, if present""" engine.dispose() # we don't want other process's conn pool with engine.connect() as connection: q = text("""SELECT job_id, stage, status, details, update_stamp FROM execution_status WHERE status = 'executing' """) result = connection.execute(q) if result.rowcount > 0: running_job = result.fetchone() return jsonify(dict(zip(result.keys(), running_job))) else: return jsonify('') @admin_api.route("/api/job_in_progress", methods=["GET"]) @jwt_ops.admin_required def is_job_in_progresss(): """Return True if there's a running execute, False if not. """ engine.dispose() # we don't want other process's conn pool with engine.connect() as connection: q = text("""SELECT job_id from execution_status WHERE status = 'executing' """) result = connection.execute(q) if result.rowcount > 0: return jsonify(True) else: return jsonify(False) def start_job(): """If no running jobs, create a job_id and execution status entry. This ensures only only one job runs at a time. If there's a running job, return None. """ engine.dispose() # we don't want other process's conn pool job_id = str(int(time.time())) q = text("""SELECT job_id from execution_status WHERE status = 'executing' """) i = text("""INSERT INTO execution_status (job_id, stage, status, details) values(:j, :stg, :stat, :det) """) i = i.bindparams(j = job_id, stg ='initiating', stat ='executing', det = '' ) running_job = None with engine.begin() as connection: # BEGIN TRANSACTION q_result = connection.execute(q) if q_result.rowcount == 0: # No running jobs ins_result = connection.execute(i) else: running_job = q_result.fetchone()[0] # COMMIT TRANSACTION #TODO: what would an exception look like here? if running_job : # There was a running job already logger.warn("Request to start job, but job_id %s already executing", str(running_job)) return None else: logger.info("Assigned job_id %s" , str(job_id ) ) return job_id def insert_rfm_scores(score_list): """Take a list of (matching_id, score) and insert into the rfm_scores table. """ # This takes about 4.5 sec to insert 80,000 rows Session = sessionmaker(engine) session = Session() metadata = MetaData() rfms = Table("rfm_scores", metadata, autoload=True, autoload_with=engine) truncate = "TRUNCATE table rfm_scores;" result = session.execute(truncate) ins_list = [] # Create a list of per-row dicts for pair in score_list: ins_list.append( {'matching_id' : pair[0], 'rfm_score' : pair[1]} ) ret = session.execute(rfms.insert(ins_list)) session.commit() # Commit all inserted rows session.close() return ret.rowcount # This is super-hacky - temporary @admin_api.route("/api/import_rfm", methods=["GET"]) def import_rfm_csv(): """ This imports the CSV files and calls the insert function""" import csv score_list = [] # Put your local file location \/ with open('C:\\Projects\\paws-stuff\\score_tuples.csv', 'r') as csvfile: reader = csv.reader(csvfile, delimiter=',') hdr = next(reader) logger.debug('Skipping header: %s', hdr) for row in reader: score_list.append(row) rc = insert_rfm_scores(score_list) return str(rc) + " rows inserted" def write_rfm_edges(rfm_dict : dict) : """Write the rfm edge dictionary to the DB""" if len(rfm_dict) == 3 : # R, F, *and* M! rfm_s = json.dumps(rfm_dict) # Convert dict to string metadata = MetaData() kvt = Table("kv_unique", metadata, autoload=True, autoload_with=engine) # See Alembic Revision ID: 05e0693f8cbb for table definition with engine.connect() as connection: ins_stmt = insert(kvt).values( # Postgres-specific insert() supporting ON CONFLICT keycol = 'rfm_edges', valcol = rfm_s, ) # If key already present in DB, do update instead upsert = ins_stmt.on_conflict_do_update( constraint='kv_unique_keycol_key', set_=dict(valcol=rfm_s) ) try: connection.execute(upsert) except Exception as e: logger.error("Insert/Update failed on rfm edge ") logger.error(e) return None return 0 else : # Malformed dict logger.error("Received rfm_edge dictionary with %s entries - expected 3", str(len(rfm_dict))) return None def read_rfm_edges() : """Read the rfm_edge record from the DB and return the dict.""" q = text("""SELECT valcol from kv_unique WHERE keycol = 'rfm_edges';""") with engine.begin() as connection: # BEGIN TRANSACTION q_result = connection.execute(q) if q_result.rowcount == 0: logger.error("No rfm_edge entry found in DB") return None else: edge_string = q_result.fetchone()[0] try: edge_dict = json.loads(edge_string) # Convert stored string to dict except json.decoder.JSONDecodeError: logger.error("rfm_edge entry found in DB was malformed") return None return edge_dict #@admin_api.route("/api/admin/test_pd", methods=["GET"]) # enable to trigger externally def pull_donations_for_rfm(): """Pull donations records for RFM scoring. Returns a list of (matching_id:int , amount:float, close_date:string (yyyy-mm-dd)) tuples""" q = text("""select matching_id, amount, close_date FROM pdp_contacts JOIN salesforcedonations as sfd on pdp_contacts.source_id = sfd.contact_id where pdp_contacts.source_type = 'salesforcecontacts' ORDER BY matching_id; """) sfd_list = [] with engine.connect() as connection: result = connection.execute(q) for row in result: sfd_list.append( (row[0], float(row[1]), str(row[2])) ) # return jsonify(sfd_list) # enable if using endpoint, but it returns a lot of data return sfd_list #@admin_api.route("/api/admin/test_pd", methods=["GET"]) # enable to trigger externally def generate_dummy_rfm_scores(): """For each matching_id, generate a random RFM score.""" from random import choice from functools import partial rc = partial( choice, range(1,6) ) q = text("""select distinct matching_id from pdp_contacts ORDER BY matching_id; """) dummy_scores = [] with engine.connect() as connection: result = connection.execute(q) for row in result: dummy_scores.append( ( row[0], str(rc()) + str(rc()) + str(rc()) ) ) # return jsonify(sfd_list) # enable if using endpoint, but it returns a lot of data logger.debug("Inserting dummy scores...") count = insert_rfm_scores(dummy_scores) logger.debug("Finished inserting") return count # ########### Test API endpoints # TODO: Remove for production # trigger rfm scoring process @admin_api.route("/api/admin/test_endpoint_gdrs", methods=["GET"]) def hit_gdrs(): num_scores = generate_dummy_rfm_scores() return jsonify({"scores added" : num_scores}) # trigger pull of SL animals @admin_api.route("/api/admin/test_sla", methods=["GET"]) def trigger_sla_pull(): import api.API_ingest.shelterluv_animals num_rows = api.API_ingest.shelterluv_animals.sla_test() return jsonify({"rows added" : num_rows}) # trigger pull of SL people @admin_api.route("/api/admin/test_slp", methods=["GET"]) def trigger_slp_pull(): import api.API_ingest.shelterluv_api_handler num_rows = api.API_ingest.shelterluv_api_handler.store_shelterluv_people_all() return jsonify({"rows added" : num_rows}) # trigger pull of SL animal events @admin_api.route("/api/admin/test_slae", methods=["GET"]) def trigger_slae_pull(): import api.API_ingest.sl_animal_events num_rows = api.API_ingest.sl_animal_events.slae_test() return jsonify({"rows added" : num_rows}) # def pdfr(): # dlist = pull_donations_for_rfm() # print("Returned " + str(len(dlist)) + " rows") # return jsonify( {'rows':len(dlist), 'row[0]': dlist[0]} ) # returns length and a sammple row # def validate_rfm_edges(): # d = read_rfm_edges() # read out of DB # print("d is: \n" + str(d) ) # write_rfm_edges(d) # Write it back # d = read_rfm_edges() # read it again # print("round-trip d is : \n " + str(d) ) # return "OK" src/server/api/__init__.py METASEP src/server/alembic/generate_rfm_mapping.py METASEP import itertools import structlog logger = structlog.get_logger() def get_all_combinations(chars): yield from itertools.product(*([chars] * 3)) def convertTuple(tup): str = '' for item in tup: str = str + item return str def start(): mapping_rows = [] mapping_rows.append( '''-- Run this script in your SQL query tool -- Run truncate command if this table is already populated -- TRUNCATE TABLE rfm_mapping; -- BEGIN; -- Fields are (rfm_score, label, (background) color, text color) ''' ) combinations = [] for x in get_all_combinations('12345'): combinations.append(convertTuple(x)) for rfm_score in combinations: label = '' background_color = '' color_text = '' r_m_average = (int(rfm_score[1]) + (int(rfm_score[2]))) / 2 r = int(rfm_score[0]) if r == 5 and (3 < r_m_average <= 5): label = 'High impact, engaged' background_color = '#034858' color_text = '#ffffff' elif r == 5 and (1 <= r_m_average <= 3): label = 'Low impact, engaged' background_color = '#47b8a7' color_text = '#000000' elif (3 <= r <= 4) and (3 < r_m_average <= 5): label = 'High impact, slipping' background_color = '#990000' color_text = '#ffffff' elif (3 <= r <= 4) and (1 <= r_m_average <= 3): label = 'Low impact, slipping' background_color = '#f77d4e' color_text = '#000000' elif (1 <= r <= 2) and (3 < r_m_average <= 5): label = 'High impact, disengaged' background_color = '#cf3030' color_text = '#ffffff' elif (1 <= r <= 2) and (1 <= r_m_average <= 3): label = 'Low impact, disengaged' background_color = '#eed0aa' color_text = '#000000' mapping_rows.append( "insert into rfm_mapping values('{}', '{}','{}', '{}');".format(rfm_score, label, background_color, color_text)) mapping_rows.append('-- COMMIT;') with open('populate_rfm_mapping.sql', 'w') as f: for item in mapping_rows: f.write("%s\n" % item) logger.debug('Completed generate_rfm_mapping') start() src/server/alembic/env.py METASEP from logging.config import fileConfig from sqlalchemy import engine_from_config from sqlalchemy import pool from os import environ from alembic import context # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. PG_URL1 = 'postgresql://postgres:' PG_URL2 = environ['POSTGRES_PASSWORD'] PG_URL3 = '@paws-compose-db/paws' PG_URL = PG_URL1 + PG_URL2 + PG_URL3 def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ # url = config.get_main_option("sqlalchemy.url") url = PG_URL context.configure( url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, url=PG_URL, ) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() src/server/wsgi.py METASEP from app import app if __name__ == "__main__": app.run() src/server/test_api.py METASEP import pytest, socket, requests, os import structlog logger = structlog.get_logger() try: from secrets_dict import BASEUSER_PW, BASEADMIN_PW except ImportError: BASEUSER_PW = os.environ['BASEUSER_PW'] BASEADMIN_PW = os.environ['BASEADMIN_PW'] jwt_token = '' # # Run 'pytest' from the command line (-v gives helpful details) # # Running pytest can result in six different exit codes: # 0 - All tests were collected and passed successfully # 1 - Tests were collected and run but some of the tests failed # 2 - Test execution was interrupted by the user # 3 - Internal error happened while executing tests # 4 - pytest command line usage error # 5 - No tests were collected # # These codes are represented by the pytest.ExitCode enum if os.getenv("IS_LOCAL") == "True": SERVER_URL = "http://localhost:3333" IS_LOCAL = True else: SERVER_URL = "http://server:5000" IS_LOCAL = False try: from secrets_dict import SHELTERLUV_SECRET_TOKEN except ImportError: SHELTERLUV_SECRET_TOKEN = os.getenv("SHELTERLUV_SECRET_TOKEN") finally: SL_Token = True if SHELTERLUV_SECRET_TOKEN else False ### DNS lookup tests ############################## def test_bad_dns(): """Verify DNS not resolving bad host names.""" with pytest.raises(socket.gaierror): socket.getaddrinfo("bad_server_name_that_should_not_resolve", "5000") @pytest.mark.skipif(IS_LOCAL, reason="Not run when IS_LOCAL") def test_db_dns(): """Verify we get IP for DB server.""" # getaddrinfo works for IPv4 and v6 try: gai = socket.getaddrinfo("db", "5000") except: pytest.fail('getaddrinfo() failed for db', pytrace=False) assert len(gai) > 0 @pytest.mark.skipif(IS_LOCAL, reason="Not run when IS_LOCAL") def test_server_dns(): """Verify we get IP for API server.""" try: gai = socket.getaddrinfo("server", "5000") except socket.gaierror: pytest.fail('getaddrinfo() failed for server', pytrace=False) assert len(gai) > 0 @pytest.mark.skipif(IS_LOCAL, reason="Not run when IS_LOCAL") def test_client_dns(): """Verify we get IP for client.""" try: gai = socket.getaddrinfo("client", "5000") except socket.gaierror: pytest.fail('getaddrinfo() failed for client', pytrace=False) assert len(gai) > 0 # Simple API tests ################################################ def test_usertest(): """Verify liveness test works""" response = requests.get(SERVER_URL + "/api/user/test") assert response.status_code == 200 ######## Dependent tests ################################# # Store info across tests class State: def __init__(self): self.state = {} @pytest.fixture(scope='session') def state() -> State: state = State() state.state['from_fixture'] = 0 return state def test_userlogin(state: State): """Verify base_user can log in/get JWT.""" data = {"username":"base_user", "password" : BASEUSER_PW} response = requests.post(SERVER_URL + "/api/user/login", json=data) assert response.status_code == 200 try: jwt_token = response.json()['access_token'] except: pytest.fail('Did not get access token', pytrace=False) assert len(jwt_token) > 16 # Store the token for later use state.state['base_user'] = jwt_token def test_useraccess(state: State): """Verify logged-in base_user can use JWT to access test_auth""" # Build auth string value including token from state b_string = 'Bearer ' + state.state['base_user'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} response = requests.get(SERVER_URL + "/api/user/test_auth", headers=auth_hdr) assert response.status_code == 200 def test_user_bad_pw(): """Verify base_user with bad pw fails""" data = {"username":"base_user", "password" : 'some_bad_password'} response = requests.post(SERVER_URL + "/api/user/login", json=data) assert response.status_code == 401 def test_inact_userblocked(state: State): """Verify base_user_inact can't login because marked inactive.""" # Same pw as base_user data = {"username":"base_user_inact", "password" : BASEUSER_PW} response = requests.post(SERVER_URL + "/api/user/login", json=data) assert response.status_code == 401 ### Admin-level tests ###################################### def test_adminlogin(state: State): """Verify base_admin can log in/get JWT.""" data = {"username":"base_admin", "password" : BASEADMIN_PW} response = requests.post(SERVER_URL + "/api/user/login", json=data) assert response.status_code == 200 try: jwt_token = response.json()['access_token'] except: pytest.fail('Did not get access token', pytrace=False) assert len(jwt_token) > 16 # Store the token for later use state.state['base_admin'] = jwt_token def test_admingetusers(state: State): """Verify logged-in base_admin can use JWT to get user list """ # Build auth string value including token from state b_string = 'Bearer ' + state.state['base_admin'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} response = requests.get(SERVER_URL + "/api/admin/user/get_users", headers=auth_hdr) assert response.status_code == 200 userlist = response.json() assert len(userlist) > 1 def test_check_usernames(state: State): """Verify logged-in base_admin can test usernames, gets correct result - existing user """ # Build auth string value including token from state b_string = 'Bearer ' + state.state['base_admin'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} data = {"username":"base_admin"} response = requests.post(SERVER_URL + "/api/admin/user/check_name", headers=auth_hdr, json=data) assert response.status_code == 200 is_user = response.json() assert is_user == 1 def test_check_badusernames(state: State): """Verify logged-in base_admin can test usernames, gets correct result - nonexistant user """ # Build auth string value including token from state b_string = 'Bearer ' + state.state['base_admin'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} data = {"username":"got_no_username_like_this"} response = requests.post(SERVER_URL + "/api/admin/user/check_name", headers=auth_hdr, json=data) assert response.status_code == 200 is_user = response.json() assert is_user == 0 def test_admin_currentFiles(state: State): """Verify admin user can get Current Files list""" b_string = 'Bearer ' + state.state['base_admin'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} response = requests.get(SERVER_URL + "/api/listCurrentFiles", headers=auth_hdr) assert response.status_code == 200 def test_admin_statistics(state: State): """360 view Statistics""" b_string = 'Bearer ' + state.state['base_admin'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} response = requests.get(SERVER_URL + "/api/statistics", headers=auth_hdr) assert response.status_code == 200 def test_usergetusers(state: State): """Verify logged-in base_user *cannot* use JWT to get user list """ # Build auth string value including token from state b_string = 'Bearer ' + state.state['base_user'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} response = requests.get(SERVER_URL + "/api/admin/user/get_users", headers=auth_hdr) assert response.status_code == 403 def test_currentFiles(state: State): """360 view Current Files list""" b_string = 'Bearer ' + state.state['base_admin'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} response = requests.get(SERVER_URL + "/api/listCurrentFiles", headers=auth_hdr) assert response.status_code == 200 def test_statistics(state: State): """360 view Statistics""" b_string = 'Bearer ' + state.state['base_admin'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} response = requests.get(SERVER_URL + "/api/statistics", headers=auth_hdr) assert response.status_code == 200 ### Shelterluv API tests ###################################### @pytest.mark.skipif(SL_Token, reason="Not run when SL_Token Present") def test_user_get_person_animal_events(state: State): """ Test that the api returns mock data if the Shelterluv Token is missing from secrets """ # Build auth string value including token from state b_string = 'Bearer ' + state.state['base_user'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} url = SERVER_URL + "/api/person/12345/animal/12345/events" try: response = requests.get(url, headers = auth_hdr) except Exception as err: logger.error(err) else: assert response.status_code == 200 from api.fake_data import sl_mock_data assert response.json() == sl_mock_data("events") @pytest.mark.skipif(SL_Token, reason="Not run when SL_Token Present") def test_user_get_animals(state: State): """ Test that the api returns mock data if the Shelterluv Token is missing from secrets """ # Build auth string value including token from state b_string = 'Bearer ' + state.state['base_user'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} url = SERVER_URL + "/api/person/12345/animals" try: response = requests.get(url, headers = auth_hdr) except Exception as err: logger.error(err) else: assert response.status_code == 200 from api.fake_data import sl_mock_data assert response.json() == sl_mock_data("animals") @pytest.mark.skipif(not SL_Token, reason="Run when SL_Token Present") def test_user_get_animals_sl_token(state: State): """ Test to confirm api does not return mock values if the Shelterluv Token is present in the secrets_dict file. Note this works on the assumption the SL token is not valid, and returns a default empty value >> This is tricky - if SL token is correct and person_id is valid, could get animal records returned. """ # Build auth string value including token from state b_string = 'Bearer ' + state.state['base_user'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} url = SERVER_URL + "/api/person/12345/animals" try: response = requests.get(url, headers = auth_hdr) except Exception as err: logger.error(err) pytest.fail('test_user_get_animals_sl_token - Request failed', pytrace=False) else: assert response.status_code == 200 assert response.json() == {'person_details': {}, 'animal_details': {}} @pytest.mark.skipif(not SL_Token, reason="Run when SL_Token Present") def test_user_get_person_animal_events_sl_token(state: State): """ Test to confirm api does not return mock values if the Shelterluv Token is present in the secrets_dict file. Note this works on the assumption the SL token is not valid, and returns a default empty value """ # Build auth string value including token from state b_string = 'Bearer ' + state.state['base_user'] assert len(b_string) > 24 auth_hdr = {'Authorization' : b_string} url = SERVER_URL + "/api/person/12345/animal/12345/events" try: response = requests.get(url, headers = auth_hdr) except Exception as err: logger.error(err) pytest.fail('test_user_get_person_animal_events_sl_token - Request failed', pytrace=False) else: assert response.status_code == 200 assert response.json() == {} src/server/shifts_importer.py METASEP import re from flask.globals import current_app from openpyxl import load_workbook from jellyfish import jaro_similarity from config import engine import structlog logger = structlog.get_logger() from sqlalchemy import insert, Table, Column, MetaData, exc from sqlalchemy.dialects.postgresql import Insert metadata = MetaData() MINIMUM_SIMILARITY = 0.85 # How good does the table match need to be? expected_columns = { 'Number' : 'volg_id', 'Site' : 'site', 'Place' : None, 'Assignment' : 'assignment', 'From date' : 'from_date', 'To date' : None, 'From time' :None, 'To time' : None, 'Hours' : 'hours', 'No Call/No Show' : None, 'Call/Email to miss shift' : None, 'Absence' : None, 'Volunteers' : None } def validate_import_vs(filename, conn): """ Validate that the XLSX column names int the file are close enough to expectations that we can trust the data. If so, insert the data into the volgisticsshifts table. """ logger.info('------ Loading %s ', filename.filename ) wb = load_workbook(filename) # ,read_only=True should be faster but gets size incorrect ws = wb['Service'] # Needs to be 'Service' sheet # ws.reset_dimensions() # Tells openpyxl to ignore what sheet says and check for itself ws.calculate_dimension() columns = ws.max_column if columns > 26: # TODO: Handle AA, AB, usw... logger.warn("Column count > 26; columns after Z not processed") columns = 26 header = [cell.value for cell in ws[1]] min_similarity = 1.0 min_column = None for expected, got in zip(expected_columns.keys(), header): jsim = jaro_similarity(expected, got) if jsim < min_similarity : min_similarity = jsim min_column = expected + ' / ' + got logger.debug("Minimum similarity: %s", "{:.2}".format(min_similarity) ) if min_column: logger.debug("On expected/got: %s", str(min_column)) if min_similarity >= MINIMUM_SIMILARITY : # Good enough to trust vs = Table("volgisticsshifts", metadata, autoload=True, autoload_with=engine) seen_header = False # Used to skip header row # Stats for import dupes = 0 other_integrity = 0 other_exceptions = 0 row_count = 0 missing_volgistics_id = 0 for row in ws.values: if seen_header: row_count += 1 if row_count % 1000 == 0: logger.debug("Row: %s", str(row_count) ) zrow = dict(zip(expected_columns.values(), row)) # zrow is a dict of db_col:value pairs, with at most one key being None (as it overwrote any previous) # We need to remove the None item, if it exists try: del zrow[None] except KeyError: pass # Cleanup time! Many older imports have... peculiarities # End cleanup if zrow['volg_id'] : # No point in importing if there's nothing to match # Finally ready to insert row into the table # stmt = Insert(vs).values(zrow) skip_dupes = stmt.on_conflict_do_nothing( constraint='uq_shift' ) try: result = conn.execute(skip_dupes) except exc.IntegrityError as e: # Catch-all for several more specific exceptions if re.match('duplicate key value', str(e.orig) ): dupes += 1 pass else: other_integrity += 1 logger.error(e) except Exception as e: other_exceptions += 1 logger.error(e) else: # Missing contact_id missing_volgistics_id += 1 else: # Haven't seen header, so this was first row. seen_header = True # NOTE: we now run this in a engine.begin() context manager, so our # parent will commit. Don't commit here! logger.info("Total rows: %s Dupes: %s Missing volgistics id: %s", str(row_count), str(dupes), str(missing_volgistics_id) ) logger.info("Other integrity exceptions: %s Other exceptions: %s", str(other_exceptions), str(other_integrity) ) wb.close() return { True : "File imported" } src/server/secrets_dict.py METASEP SD_COMMENT="This is for local development" APP_SECRET_KEY="ASKASK" JWT_SECRET="JWTSECRET" POSTGRES_PASSWORD="thispasswordisverysecure" BASEUSER_PW="basepw" BASEEDITOR_PW="editorpw" BASEADMIN_PW="basepw" DROPBOX_APP="DBAPPPW" src/server/models.py METASEP import datetime import re from itertools import combinations import pandas as pd import sqlalchemy as sa from sqlalchemy import ( Boolean, DateTime, Index, Integer, String, delete, desc, func, literal_column, select, text, tuple_, ) from sqlalchemy.dialects.postgresql import JSONB, insert from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.sql.functions import coalesce Base = declarative_base() def Column(*colargs, source_column=None, contacts_column=None, **kwargs): # Currently many of our database tables are populated by loading some csv or # excel table and inserting those columns with little more than a column # name change. Many of those databse columns will also later be copied into # pdp_contacts with little processing. This drop-in replacement for # sqlalchemy's Column function lets us easily provide this extra information # for some automated processing. info = kwargs.get("info", {}) if source_column: info["source_column"] = source_column if contacts_column: info["contacts_column"] = contacts_column return sa.Column(*colargs, info=info, **kwargs) def get_source_column_translation(cls): # Produce a mapping of source column to database column for a class that # uses the Column helper return { col.info["source_column"]: col.name for col in cls.__table__.columns if "source_column" in col.info } def get_contacts_mapping(cls): # Produce an association of pdp_contacts columns with some other table's # columns for use in an INSERT ... FROM SELECT return [ (PdpContacts.matching_id, 0), (PdpContacts.source_type, literal_column(f"'{cls.__tablename__}'")), ] + [ (col.info["contacts_column"], col) for col in cls.__table__.columns if "contacts_column" in col.info ] def dedup_consecutive(table, unique_id, id, order_by, dedup_on): # Many of our raw data tables have a similar structure: a contact id column, # an insert time column, and several other pieces of raw data. If someone # inserts a "new" record for a certain id, but none of the raw data is # different from the previous record, we'd like to get rid of it. # # This bit of SQL magic partitions a table by a given id column, orders it # by some order_by column, and removes duplicate consecutive entries based # on some dedup_on expression. # # Note the use of "IS NOT DISTINCT FROM" instead of "!="; the latter does # not work well on null values. sq = select( unique_id, id, order_by, dedup_on.bool_op("IS NOT DISTINCT FROM")( func.lag(dedup_on).over(partition_by=id, order_by=order_by) ).label("is_dupe"), ).subquery() to_delete = select(sq.c[0]).where(sq.c[3]).subquery() return delete(table).where(unique_id == to_delete.c[0]) def normalize_phone_number(number): result = None if number and str(number) != "nan": number = re.sub("[() -.+]", "", str(number)) if number and number[0] == "1": number = number[1:] if number.isdigit() and len(number) == 10: result = number return result class PdpContacts(Base): __tablename__ = "pdp_contacts" __table_args__ = ( Index("idx_pdp_contacts_lower_first_name", text("lower(first_name)")), Index("idx_pdp_contacts_lower_last_name", text("lower(last_name)")), Index("idx_pdp_contacts_lower_email", text("lower(email)")), Index("idx_pdp_contacts_source_type_and_id", "source_type", "source_id"), ) _id = Column(Integer, primary_key=True, autoincrement=True) matching_id = Column(Integer) source_type = Column(String) source_id = Column(String) is_organization = Column(Boolean, default=False) first_name = Column(String, default=None) last_name = Column(String, default=None) email = Column(String, default=None) mobile = Column(String, default=None) street_and_number = Column(String, default=None) apartment = Column(String) city = Column(String, default=None) state = Column(String, default=None) zip = Column(String, default=None) json = Column(JSONB) created_date = Column(DateTime, default=datetime.datetime.utcnow) archived_date = Column(DateTime, default=None) P = PdpContacts class SalesForceContacts(Base): __tablename__ = "salesforcecontacts" _id = Column(Integer, primary_key=True) contact_id = Column( String, source_column="Contact ID 18", contacts_column=P.source_id ) first_name = Column( String, source_column="First Name", contacts_column=P.first_name ) last_name = Column(String, source_column="Last Name", contacts_column=P.last_name) account_name = Column(String, source_column="Account Name") mailing_country = Column(String, source_column="Mailing Country") mailing_street = Column( String, source_column="Mailing Street", contacts_column=P.street_and_number ) mailing_city = Column(String, source_column="Mailing City", contacts_column=P.city) mailing_state_province = Column( String, source_column="Mailing State/Province", contacts_column=P.state ) mailing_zip_postal_code = Column( String, source_column="Mailing Zip/Postal Code", contacts_column=P.zip ) phone = Column(String, source_column="Phone") mobile = Column(String, source_column="Mobile") email = Column(String, source_column="Email", contacts_column=P.email) json = Column(JSONB) created_date = Column(DateTime, default=datetime.datetime.utcnow) @classmethod def insert_from_file_df(cls, df, conn): column_translation = get_source_column_translation(cls) df = df[column_translation.keys()] df = df.rename(columns=column_translation) df["phone"] = df["phone"].apply(normalize_phone_number) df["mobile"] = df["mobile"].apply(normalize_phone_number) dedup_on = [col for col in cls.__table__.columns if col.name in df.columns] df["created_date"] = datetime.datetime.utcnow() df.to_sql(cls.__tablename__, conn, if_exists="append", index=False) conn.execute( dedup_consecutive( cls.__table__, unique_id=cls._id, id=cls.contact_id, order_by=cls.created_date, dedup_on=tuple_(*dedup_on), ) ) @classmethod def insert_into_pdp_contacts(cls): column_mapping = get_contacts_mapping(cls) + [ # Note: current version of SQLalchemy doesn't like seeing the same # column object twice in insert().from_select, hence this # literal_column. I think this is fixed in a later version? (P.apartment, literal_column("mailing_street")), (P.mobile, coalesce(cls.mobile, cls.phone)), (P.is_organization, cls.account_name.not_like("% Household")), ] contacts_columns, this_columns = zip(*column_mapping) return insert(PdpContacts).from_select( list(contacts_columns), select(*this_columns) .distinct(cls.contact_id) .order_by(cls.contact_id, desc(cls.created_date)), ) class ShelterluvPeople(Base): __tablename__ = "shelterluvpeople" _id = Column(Integer, primary_key=True) firstname = Column(String, source_column="Firstname", contacts_column=P.first_name) lastname = Column(String, source_column="Lastname", contacts_column=P.last_name) id = Column(String, source_column="ID") internal_id = Column( String, source_column="Internal-ID", contacts_column=P.source_id ) associated = Column(String, source_column="Associated") street = Column(String, source_column="Street", contacts_column=P.street_and_number) apartment = Column(String, source_column="Apartment", contacts_column=P.apartment) city = Column(String, source_column="City", contacts_column=P.city) state = Column(String, source_column="State", contacts_column=P.state) zip = Column(String, source_column="Zip", contacts_column=P.zip) email = Column(String, source_column="Email", contacts_column=P.email) phone = Column(String, source_column="Phone", contacts_column=P.mobile) animal_ids = Column(JSONB, source_column="Animal_ids") json = Column(JSONB) created_date = Column(DateTime, default=datetime.datetime.utcnow) @classmethod def insert_from_df(cls, df, conn): column_translation = get_source_column_translation(cls) df = df[column_translation.keys()] df = df.rename(columns=column_translation) df["phone"] = df["phone"].apply(normalize_phone_number) dedup_on = [col for col in cls.__table__.columns if col.name in df.columns] df["created_date"] = datetime.datetime.utcnow() df.to_sql( cls.__tablename__, conn, dtype={"animal_ids": JSONB}, if_exists="append", index=False, ) conn.execute( dedup_consecutive( cls.__table__, unique_id=cls._id, id=cls.internal_id, order_by=cls.created_date, dedup_on=tuple_(*dedup_on), ) ) @classmethod def insert_into_pdp_contacts(cls): column_mapping = get_contacts_mapping(cls) contacts_columns, this_columns = zip(*column_mapping) return insert(PdpContacts).from_select( list(contacts_columns), select(*this_columns) .distinct(cls.internal_id) .order_by(cls.internal_id, desc(cls.created_date)), ) class Volgistics(Base): __tablename__ = "volgistics" _id = Column(Integer, primary_key=True) number = Column(String, source_column="Number", contacts_column=P.source_id) last_name = Column(String, source_column="Last name", contacts_column=P.last_name) first_name = Column( String, source_column="First name", contacts_column=P.first_name ) middle_name = Column(String, source_column="Middle name") complete_address = Column(String, source_column="Complete address") street_1 = Column(String, source_column="Street 1") street_2 = Column(String, source_column="Street 2") street_3 = Column(String, source_column="Street 3") city = Column(String, source_column="City", contacts_column=P.city) state = Column(String, source_column="State", contacts_column=P.state) zip = Column(String, source_column="Zip", contacts_column=P.zip) all_phone_numbers = Column(String, source_column="All phone numbers") home = Column(String, source_column="Home") work = Column(String, source_column="Work") cell = Column(String, source_column="Cell") email = Column(String, source_column="Email", contacts_column=P.email) json = Column(JSONB) created_date = Column(DateTime, default=datetime.datetime.utcnow) @classmethod def insert_from_file(cls, xl_file, conn): df = pd.read_excel(xl_file, sheet_name="Master") column_translation = get_source_column_translation(cls) df = df[column_translation.keys()] df = df.rename(columns=column_translation) df["home"] = df["home"].apply(normalize_phone_number) df["work"] = df["work"].apply(normalize_phone_number) df["cell"] = df["cell"].apply(normalize_phone_number) dedup_on = [col for col in cls.__table__.columns if col.name in df.columns] df["created_date"] = datetime.datetime.utcnow() df.to_sql( cls.__tablename__, conn, if_exists="append", index=False, ) conn.execute( dedup_consecutive( cls.__table__, unique_id=cls._id, id=cls.number, order_by=cls.created_date, dedup_on=tuple_(*dedup_on), ) ) @classmethod def insert_into_pdp_contacts(cls): column_mapping = get_contacts_mapping(cls) + [ # NOTE: This logic seems wrong. It peels off the street number and # calls it the "apartment," and calls the rest of the address the # "street and number." ( P.street_and_number, literal_column("regexp_replace(street_1, '^[^ ]* ?', '')"), ), (P.apartment, literal_column("(regexp_match(street_1, '^([^ ]*) '))[1]")), (P.mobile, coalesce(cls.cell, cls.home)), ] contacts_columns, this_columns = zip(*column_mapping) return insert(PdpContacts).from_select( list(contacts_columns), select(*this_columns) .distinct(cls.number) .order_by(cls.number, desc(cls.created_date)), ) class ManualMatches(Base): __tablename__ = "manual_matches" source_type_1 = Column(String, primary_key=True) source_id_1 = Column(String, primary_key=True) source_type_2 = Column(String, primary_key=True) source_id_2 = Column(String, primary_key=True) @classmethod def insert_from_df(cls, df, conn): # Our input csv has columns like "salesforcecontacts," "volgistics," and # "shelterluvpeople," where two columns are non-null if there is an # association between those two ids. We massage this table into one that # is easier to join on. match_dicts = df.to_dict(orient="records") matched_pairs = [] for match in match_dicts: non_nulls = {k: v for (k, v) in match.items() if not pd.isna(v)} for ((st1, sid1), (st2, sid2)) in combinations(non_nulls.items(), 2): matched_pairs.append( { "source_type_1": st1, "source_id_1": sid1, "source_type_2": st2, "source_id_2": sid2, } ) conn.execute(insert(cls).values(matched_pairs).on_conflict_do_nothing()) class SalesforceDonations(Base): __tablename__ = "salesforcedonations" _id = sa.Column(sa.Integer, primary_key=True) opp_id = sa.Column(sa.String) recurring_donor = sa.Column(sa.Boolean) primary_contact = sa.Column(sa.String) contact_id = sa.Column(sa.String) amount = sa.Column(sa.Numeric) close_date = sa.Column(sa.Date) donation_type = sa.Column(sa.String) primary_campaign_source = sa.Column(sa.String) src/server/donations_importer.py METASEP import re from flask.globals import current_app from openpyxl import load_workbook from jellyfish import jaro_similarity from config import engine import structlog logger = structlog.get_logger() from sqlalchemy import insert, Table, Column, MetaData, exc from sqlalchemy.dialects.postgresql import Insert metadata = MetaData() MINIMUM_SIMILARITY = 0.85 # How good does the table match need to be? expected_columns = {'Recurring donor' : 'recurring_donor', # 'Export XLSX file column name' : 'db column name' 'Opportunity Owner': None , # None means we won't import that column into DB 'Account ID 18': None , 'Account Name': None, 'Primary Contact': 'primary_contact', 'Contact ID 18': 'contact_id', 'Opportunity ID 18': 'opp_id', # Should be a unique donation ID but isn't quite 'Opportunity Name': None, 'Stage': None, 'Fiscal Period': None, 'Amount': 'amount', 'Probability (%)': None, 'Age': None, 'Close Date': 'close_date', 'Created Date': None, 'Type': 'donation_type', 'Primary Campaign Source': 'primary_campaign_source' , 'Source': None } def validate_import_sfd(filename, conn): """ Validate that the XLSX column names int the file are close enough to expectations that we can trust the data. If so, insert the data into the salseforcedonations table. """ logger.info('---------------------- Loading %s -------------------', filename.filename) wb = load_workbook(filename) # ,read_only=True should be faster but gets size incorrect ws = wb.active # ws.reset_dimensions() # Tells openpyxl to ignore what sheet says and check for itself ws.calculate_dimension() columns = ws.max_column if columns > 26: # TODO: Handle AA, AB, usw... logger.warning("Column count > 26; columns after Z not processed") columns = 26 header = [cell.value for cell in ws[1]] min_similarity = 1.0 min_column = None for expected, got in zip(expected_columns.keys(), header): jsim = jaro_similarity(expected, got) if jsim < min_similarity : min_similarity = jsim min_column = expected + ' / ' + got logger.debug("Minimum similarity: %s", "{:.2}".format(min_similarity) ) if min_column: logger.debug("On expected/got: %s", str(min_column)) if min_similarity >= MINIMUM_SIMILARITY : # Good enough to trust sfd = Table("salesforcedonations", metadata, autoload=True, autoload_with=engine) seen_header = False # Used to skip header row # Stats for import dupes = 0 other_integrity = 0 other_exceptions = 0 row_count = 0 missing_contact_id = 0 for row in ws.values: if seen_header: row_count += 1 if row_count % 1000 == 0: logger.debug("Row: %s", str(row_count) ) zrow = dict(zip(expected_columns.values(), row)) # zrow is a dict of db_col:value pairs, with at most one key being None (as it overwrote any previous) # We need to remove the None item, if it exists try: del zrow[None] except KeyError: pass # Cleanup time! Many older imports have... peculiarities if zrow['amount'] == None: # We get some with no value, probably user error zrow['amount'] = 0.0 # Setting bad amounts to 0 as per KF if zrow['recurring_donor'] == '=FALSE()' : zrow['recurring_donor'] = False if zrow['recurring_donor'] == '=TRUE()' : zrow['recurring_donor'] = True # End cleanup if zrow['contact_id'] : # No point in importing if there's nothing to match # Finally ready to insert row into the table # stmt = Insert(sfd).values(zrow) skip_dupes = stmt.on_conflict_do_nothing( constraint='uq_donation' ) try: result = conn.execute(skip_dupes) except exc.IntegrityError as e: # Catch-all for several more specific exceptions if re.match('duplicate key value', str(e.orig) ): dupes += 1 pass else: other_integrity += 1 logger.error(e) except Exception as e: other_exceptions += 1 logger.error(e) else: # Missing contact_id missing_contact_id += 1 else: # Haven't seen header, so this was first row. seen_header = True # NOTE: we now run this in a engine.begin() context manager, so our # parent will commit. Don't commit here! logger.debug("Stats: Total rows: %s Dupes: %s Missing contact_id: %s", str(row_count) , str(dupes), str(missing_contact_id) ) logger.debug("Other integrity exceptions: %s Other exceptions: %s", str(other_integrity), str(other_exceptions) ) wb.close() return { True : "File imported" } else: # Similarity too low wb.close() logger.warn("Similarity value of %s is below threshold of %s so file was not processed ", '{:.2}'.format(min_similarity), str(MINIMUM_SIMILARITY)) return {False : "Similarity to expected column names below threshold"} src/server/constants.py METASEP import os # Determine if app is ran from docker or local by testing the env var "IS_LOCAL" IS_LOCAL = os.getenv("IS_LOCAL") BASE_PATH = "../local_files/" if IS_LOCAL == "True" else "/app/static/" # Initiate local file system RAW_DATA_PATH = BASE_PATH + "raw_data/" OUTPUT_PATH = BASE_PATH + "output/" LOGS_PATH = BASE_PATH + "logs/" REPORT_PATH = OUTPUT_PATH + "reports/" ZIPPED_FILES = BASE_PATH + "zipped/" src/server/config.py METASEP import os import sys import sqlalchemy as db import models from constants import IS_LOCAL, BASE_PATH, RAW_DATA_PATH, OUTPUT_PATH, LOGS_PATH, REPORT_PATH, ZIPPED_FILES import logging import structlog from structlog.processors import CallsiteParameter # structlog setup for complete app # Formatters shared_processors=[ structlog.contextvars.merge_contextvars, structlog.processors.add_log_level, structlog.processors.StackInfoRenderer(), structlog.dev.set_exc_info, structlog.processors.TimeStamper(fmt="iso", utc=True ), structlog.processors.CallsiteParameterAdder( [ CallsiteParameter.FILENAME, CallsiteParameter.FUNC_NAME, CallsiteParameter.LINENO, ]) ] # Select output processor depending if running locally/interactively or not if sys.stderr.isatty(): # Pretty-print processors = shared_processors + [structlog.dev.ConsoleRenderer(), ] else: # Emit structured/JSON processors = shared_processors + [ structlog.processors.dict_tracebacks, structlog.processors.JSONRenderer(), ] structlog.configure( processors=processors, wrapper_class=structlog.make_filtering_bound_logger(logging.NOTSET), context_class=dict, logger_factory=structlog.PrintLoggerFactory(), cache_logger_on_first_use=False ) logger = structlog.get_logger() # Initiate postgres DB # best practices is to have only one engine per application process # https://docs.sqlalchemy.org/en/13/core/connections.html POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD", "thispasswordisverysecure") POSTGRES_DATABASE = os.getenv("POSTGRES_DATABASE", "paws") POSTGRES_USER = os.getenv("POSTGRES_USER", "postgres") if IS_LOCAL == "True": DB = os.getenv( "LOCAL_DB_IP", "postgresql://postgres:" + POSTGRES_PASSWORD + "@localhost:5432/" + POSTGRES_DATABASE, ) else: DB = ( "postgresql://" + POSTGRES_USER + ":" + POSTGRES_PASSWORD + "@paws-compose-db/" + POSTGRES_DATABASE ) engine = db.create_engine(DB) # Run Alembic to create managed tables # from alembic.config import Config # from alembic import command # alembic_cfg = Config("alembic.ini") # command.stamp(alembic_cfg, "head") # logger.warn("Testing") with engine.connect() as connection: import db_setup.base_users db_setup.base_users.create_base_roles() # IFF there are no roles already db_setup.base_users.create_base_users() # IFF there are no users already db_setup.base_users.populate_sl_event_types() # IFF there are no event types already db_setup.base_users.populate_rfm_mapping_table() # Set to True to force loading latest version of populate script # found in the server/alembic directory # Create these directories only one time - when initializing if not os.path.isdir(BASE_PATH): os.makedirs(BASE_PATH, exist_ok=True) os.makedirs(RAW_DATA_PATH, exist_ok=True) os.makedirs(OUTPUT_PATH, exist_ok=True) os.makedirs(LOGS_PATH, exist_ok=True) os.makedirs(RAW_DATA_PATH, exist_ok=True) os.makedirs(REPORT_PATH, exist_ok=True) os.makedirs(ZIPPED_FILES, exist_ok=True) src/server/app.py METASEP import os import structlog logger = structlog.get_logger() from flask import Flask from flask_jwt_extended import JWTManager try: from secrets_dict import JWT_SECRET, APP_SECRET_KEY except ImportError: # Not running locally logger.info("Could not get secrets from file, trying environment **********") from os import environ try: JWT_SECRET = environ['JWT_SECRET'] APP_SECRET_KEY = environ['APP_SECRET_KEY'] except KeyError: # Nor in environment # You're SOL for now logger.critical("Couldn't get secrets from file or environment") # logger = structlog.get_logger() app = Flask(__name__) app.config["JWT_SECRET_KEY"] = JWT_SECRET app.config["JWT_MAX_TIMEOUT"] = 30*60 #Seconds # We'll use max for default but can be reduced for testing app.config["JWT_ACCESS_TOKEN_EXPIRES"] = app.config["JWT_MAX_TIMEOUT"] jwt = JWTManager(app) app.secret_key = APP_SECRET_KEY app.config["MAX_CONTENT_LENGTH"] = 500 * 1024 * 1024 # 500 Megs app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 0 from api.admin_api import admin_api from api.common_api import common_api from api.user_api import user_api from api.internal_api import internal_api # Emit a log entry at each level app.register_blueprint(admin_api) app.register_blueprint(common_api) app.register_blueprint(user_api) app.register_blueprint(internal_api) # init_db_schema.start(connection) logger.debug("Log sample - debug") logger.info("Log sample - info") logger.warn("Log sample - warn") logger.error("Log sample - error") logger.critical("Log sample - critical") if __name__ == "__main__": FLASK_PORT = os.getenv("FLASK_PORT", None) # create_app() app.run(host="0.0.0.0", debug=True, port=FLASK_PORT) src/server/pub_sub/stubs/pubsub_api_pb2_grpc.py METASEP
[ { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"", "type": "infile" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously\n publish more events while publishes are still in flight on the server side.\n\n PublishResponse holds a PublishResult for each event published that indicates success\n or failure of the publish. A client can then retry the publish as needed before sending\n more PublishRequests for new events to publish.\n\n A client must send a valid publish request with one or more events every 70 seconds to hold on to the stream.\n Otherwise, the server closes the stream and notifies the client. Once the client is notified of the stream closure,\n it must make a new PublishStream call to resume publishing.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PubSubServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Subscribe': grpc.stream_stream_rpc_method_handler(", "type": "infile" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )", "type": "infile" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )", "type": "infile" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously\n publish more events while publishes are still in flight on the server side.\n\n PublishResponse holds a PublishResult for each event published that indicates success\n or failure of the publish. A client can then retry the publish as needed before sending\n more PublishRequests for new events to publish.\n\n A client must send a valid publish request with one or more events every 70 seconds to hold on to the stream.\n Otherwise, the server closes the stream and notifies the client. Once the client is notified of the stream closure,\n it must make a new PublishStream call to resume publishing.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PubSubServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Subscribe': grpc.stream_stream_rpc_method_handler(\n servicer.Subscribe,\n request_deserializer=pubsub__api__pb2.FetchRequest.FromString,\n response_serializer=pubsub__api__pb2.FetchResponse.SerializeToString,\n ),\n 'GetSchema': grpc.unary_unary_rpc_method_handler(", "type": "infile" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )", "type": "infile" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously\n publish more events while publishes are still in flight on the server side.\n\n PublishResponse holds a PublishResult for each event published that indicates success\n or failure of the publish. A client can then retry the publish as needed before sending\n more PublishRequests for new events to publish.\n\n A client must send a valid publish request with one or more events every 70 seconds to hold on to the stream.\n Otherwise, the server closes the stream and notifies the client. Once the client is notified of the stream closure,\n it must make a new PublishStream call to resume publishing.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PubSubServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Subscribe': grpc.stream_stream_rpc_method_handler(\n servicer.Subscribe,\n request_deserializer=pubsub__api__pb2.FetchRequest.FromString,\n response_serializer=pubsub__api__pb2.FetchResponse.SerializeToString,\n ),\n 'GetSchema': grpc.unary_unary_rpc_method_handler(\n servicer.GetSchema,\n request_deserializer=pubsub__api__pb2.SchemaRequest.FromString,\n response_serializer=pubsub__api__pb2.SchemaInfo.SerializeToString,\n ),\n 'GetTopic': grpc.unary_unary_rpc_method_handler(", "type": "infile" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )", "type": "infile" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously\n publish more events while publishes are still in flight on the server side.\n\n PublishResponse holds a PublishResult for each event published that indicates success\n or failure of the publish. A client can then retry the publish as needed before sending\n more PublishRequests for new events to publish.\n\n A client must send a valid publish request with one or more events every 70 seconds to hold on to the stream.\n Otherwise, the server closes the stream and notifies the client. Once the client is notified of the stream closure,\n it must make a new PublishStream call to resume publishing.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PubSubServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Subscribe': grpc.stream_stream_rpc_method_handler(\n servicer.Subscribe,\n request_deserializer=pubsub__api__pb2.FetchRequest.FromString,\n response_serializer=pubsub__api__pb2.FetchResponse.SerializeToString,\n ),\n 'GetSchema': grpc.unary_unary_rpc_method_handler(\n servicer.GetSchema,\n request_deserializer=pubsub__api__pb2.SchemaRequest.FromString,\n response_serializer=pubsub__api__pb2.SchemaInfo.SerializeToString,\n ),\n 'GetTopic': grpc.unary_unary_rpc_method_handler(\n servicer.GetTopic,\n request_deserializer=pubsub__api__pb2.TopicRequest.FromString,\n response_serializer=pubsub__api__pb2.TopicInfo.SerializeToString,\n ),\n 'Publish': grpc.unary_unary_rpc_method_handler(", "type": "infile" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously\n publish more events while publishes are still in flight on the server side.\n\n PublishResponse holds a PublishResult for each event published that indicates success\n or failure of the publish. A client can then retry the publish as needed before sending\n more PublishRequests for new events to publish.\n\n A client must send a valid publish request with one or more events every 70 seconds to hold on to the stream.\n Otherwise, the server closes the stream and notifies the client. Once the client is notified of the stream closure,\n it must make a new PublishStream call to resume publishing.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PubSubServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Subscribe': grpc.stream_stream_rpc_method_handler(\n servicer.Subscribe,\n request_deserializer=pubsub__api__pb2.FetchRequest.FromString,\n response_serializer=pubsub__api__pb2.FetchResponse.SerializeToString,\n ),\n 'GetSchema': grpc.unary_unary_rpc_method_handler(\n servicer.GetSchema,\n request_deserializer=pubsub__api__pb2.SchemaRequest.FromString,\n response_serializer=pubsub__api__pb2.SchemaInfo.SerializeToString,\n ),\n 'GetTopic': grpc.unary_unary_rpc_method_handler(\n servicer.GetTopic,\n request_deserializer=pubsub__api__pb2.TopicRequest.FromString,\n response_serializer=pubsub__api__pb2.TopicInfo.SerializeToString,\n ),\n 'Publish': grpc.unary_unary_rpc_method_handler(\n servicer.Publish,\n request_deserializer=pubsub__api__pb2.PublishRequest.FromString,\n response_serializer=pubsub__api__pb2.PublishResponse.SerializeToString,\n ),\n 'PublishStream': grpc.stream_stream_rpc_method_handler(", "type": "infile" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc", "type": "non_informative" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously\n publish more events while publishes are still in flight on the server side.\n", "type": "non_informative" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously\n publish more events while publishes are still in flight on the server side.\n\n PublishResponse holds a PublishResult for each event published that indicates success\n or failure of the publish. A client can then retry the publish as needed before sending\n more PublishRequests for new events to publish.\n\n A client must send a valid publish request with one or more events every 70 seconds to hold on to the stream.\n Otherwise, the server closes the stream and notifies the client. Once the client is notified of the stream closure,\n it must make a new PublishStream call to resume publishing.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PubSubServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Subscribe': grpc.stream_stream_rpc_method_handler(\n servicer.Subscribe,\n request_deserializer=pubsub__api__pb2.FetchRequest.FromString,\n response_serializer=pubsub__api__pb2.FetchResponse.SerializeToString,\n ),\n 'GetSchema': grpc.unary_unary_rpc_method_handler(\n servicer.GetSchema,\n request_deserializer=pubsub__api__pb2.SchemaRequest.FromString,\n response_serializer=pubsub__api__pb2.SchemaInfo.SerializeToString,\n ),\n 'GetTopic': grpc.unary_unary_rpc_method_handler(\n servicer.GetTopic,\n request_deserializer=pubsub__api__pb2.TopicRequest.FromString,\n response_serializer=pubsub__api__pb2.TopicInfo.SerializeToString,\n ),\n 'Publish': grpc.unary_unary_rpc_method_handler(\n servicer.Publish,\n request_deserializer=pubsub__api__pb2.PublishRequest.FromString,\n response_serializer=pubsub__api__pb2.PublishResponse.SerializeToString,\n ),\n 'PublishStream': grpc.stream_stream_rpc_method_handler(\n servicer.PublishStream,\n request_deserializer=pubsub__api__pb2.PublishRequest.FromString,\n response_serializer=pubsub__api__pb2.PublishResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'eventbus.v1.PubSub', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass PubSub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n", "type": "non_informative" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously", "type": "non_informative" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time", "type": "non_informative" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously\n publish more events while publishes are still in flight on the server side.\n\n PublishResponse holds a PublishResult for each event published that indicates success\n or failure of the publish. A client can then retry the publish as needed before sending\n more PublishRequests for new events to publish.\n\n A client must send a valid publish request with one or more events every 70 seconds to hold on to the stream.\n Otherwise, the server closes the stream and notifies the client. Once the client is notified of the stream closure,\n it must make a new PublishStream call to resume publishing.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PubSubServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Subscribe': grpc.stream_stream_rpc_method_handler(\n servicer.Subscribe,\n request_deserializer=pubsub__api__pb2.FetchRequest.FromString,\n response_serializer=pubsub__api__pb2.FetchResponse.SerializeToString,\n ),\n 'GetSchema': grpc.unary_unary_rpc_method_handler(\n servicer.GetSchema,\n request_deserializer=pubsub__api__pb2.SchemaRequest.FromString,\n response_serializer=pubsub__api__pb2.SchemaInfo.SerializeToString,\n ),\n 'GetTopic': grpc.unary_unary_rpc_method_handler(\n servicer.GetTopic,\n request_deserializer=pubsub__api__pb2.TopicRequest.FromString,\n response_serializer=pubsub__api__pb2.TopicInfo.SerializeToString,\n ),\n 'Publish': grpc.unary_unary_rpc_method_handler(\n servicer.Publish,\n request_deserializer=pubsub__api__pb2.PublishRequest.FromString,\n response_serializer=pubsub__api__pb2.PublishResponse.SerializeToString,\n ),\n 'PublishStream': grpc.stream_stream_rpc_method_handler(\n servicer.PublishStream,\n request_deserializer=pubsub__api__pb2.PublishRequest.FromString,\n response_serializer=pubsub__api__pb2.PublishResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'eventbus.v1.PubSub', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass PubSub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n @staticmethod\n def Subscribe(request_iterator,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.stream_stream(request_iterator, target, '/eventbus.v1.PubSub/Subscribe',\n pubsub__api__pb2.FetchRequest.SerializeToString,\n pubsub__api__pb2.FetchResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def GetSchema(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/eventbus.v1.PubSub/GetSchema',\n pubsub__api__pb2.SchemaRequest.SerializeToString,\n pubsub__api__pb2.SchemaInfo.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def GetTopic(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/eventbus.v1.PubSub/GetTopic',\n pubsub__api__pb2.TopicRequest.SerializeToString,\n pubsub__api__pb2.TopicInfo.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def Publish(request,\n target,", "type": "random" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously\n publish more events while publishes are still in flight on the server side.\n\n PublishResponse holds a PublishResult for each event published that indicates success\n or failure of the publish. A client can then retry the publish as needed before sending\n more PublishRequests for new events to publish.\n\n A client must send a valid publish request with one or more events every 70 seconds to hold on to the stream.\n Otherwise, the server closes the stream and notifies the client. Once the client is notified of the stream closure,\n it must make a new PublishStream call to resume publishing.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PubSubServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Subscribe': grpc.stream_stream_rpc_method_handler(\n servicer.Subscribe,\n request_deserializer=pubsub__api__pb2.FetchRequest.FromString,\n response_serializer=pubsub__api__pb2.FetchResponse.SerializeToString,\n ),\n 'GetSchema': grpc.unary_unary_rpc_method_handler(\n servicer.GetSchema,\n request_deserializer=pubsub__api__pb2.SchemaRequest.FromString,\n response_serializer=pubsub__api__pb2.SchemaInfo.SerializeToString,\n ),\n 'GetTopic': grpc.unary_unary_rpc_method_handler(\n servicer.GetTopic,\n request_deserializer=pubsub__api__pb2.TopicRequest.FromString,\n response_serializer=pubsub__api__pb2.TopicInfo.SerializeToString,\n ),\n 'Publish': grpc.unary_unary_rpc_method_handler(\n servicer.Publish,\n request_deserializer=pubsub__api__pb2.PublishRequest.FromString,\n response_serializer=pubsub__api__pb2.PublishResponse.SerializeToString,\n ),\n 'PublishStream': grpc.stream_stream_rpc_method_handler(\n servicer.PublishStream,\n request_deserializer=pubsub__api__pb2.PublishRequest.FromString,\n response_serializer=pubsub__api__pb2.PublishResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'eventbus.v1.PubSub', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass PubSub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n @staticmethod\n def Subscribe(request_iterator,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.stream_stream(request_iterator, target, '/eventbus.v1.PubSub/Subscribe',\n pubsub__api__pb2.FetchRequest.SerializeToString,\n pubsub__api__pb2.FetchResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def GetSchema(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/eventbus.v1.PubSub/GetSchema',\n pubsub__api__pb2.SchemaRequest.SerializeToString,\n pubsub__api__pb2.SchemaInfo.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def GetTopic(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/eventbus.v1.PubSub/GetTopic',\n pubsub__api__pb2.TopicRequest.SerializeToString,\n pubsub__api__pb2.TopicInfo.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def Publish(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/eventbus.v1.PubSub/Publish',\n pubsub__api__pb2.PublishRequest.SerializeToString,\n pubsub__api__pb2.PublishResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def PublishStream(request_iterator,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.stream_stream(request_iterator, target, '/eventbus.v1.PubSub/PublishStream',", "type": "random" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously\n publish more events while publishes are still in flight on the server side.\n\n PublishResponse holds a PublishResult for each event published that indicates success\n or failure of the publish. A client can then retry the publish as needed before sending\n more PublishRequests for new events to publish.\n\n A client must send a valid publish request with one or more events every 70 seconds to hold on to the stream.\n Otherwise, the server closes the stream and notifies the client. Once the client is notified of the stream closure,\n it must make a new PublishStream call to resume publishing.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PubSubServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Subscribe': grpc.stream_stream_rpc_method_handler(\n servicer.Subscribe,\n request_deserializer=pubsub__api__pb2.FetchRequest.FromString,\n response_serializer=pubsub__api__pb2.FetchResponse.SerializeToString,\n ),\n 'GetSchema': grpc.unary_unary_rpc_method_handler(\n servicer.GetSchema,\n request_deserializer=pubsub__api__pb2.SchemaRequest.FromString,\n response_serializer=pubsub__api__pb2.SchemaInfo.SerializeToString,\n ),\n 'GetTopic': grpc.unary_unary_rpc_method_handler(\n servicer.GetTopic,\n request_deserializer=pubsub__api__pb2.TopicRequest.FromString,\n response_serializer=pubsub__api__pb2.TopicInfo.SerializeToString,\n ),\n 'Publish': grpc.unary_unary_rpc_method_handler(\n servicer.Publish,\n request_deserializer=pubsub__api__pb2.PublishRequest.FromString,\n response_serializer=pubsub__api__pb2.PublishResponse.SerializeToString,\n ),\n 'PublishStream': grpc.stream_stream_rpc_method_handler(\n servicer.PublishStream,\n request_deserializer=pubsub__api__pb2.PublishRequest.FromString,\n response_serializer=pubsub__api__pb2.PublishResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'eventbus.v1.PubSub', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass PubSub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n @staticmethod\n def Subscribe(request_iterator,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.stream_stream(request_iterator, target, '/eventbus.v1.PubSub/Subscribe',\n pubsub__api__pb2.FetchRequest.SerializeToString,\n pubsub__api__pb2.FetchResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def GetSchema(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/eventbus.v1.PubSub/GetSchema',\n pubsub__api__pb2.SchemaRequest.SerializeToString,\n pubsub__api__pb2.SchemaInfo.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def GetTopic(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,", "type": "random" }, { "content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom pub_sub.stubs import pubsub_api_pb2 as pubsub__api__pb2\n\n\nclass PubSubStub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Subscribe = channel.stream_stream(\n '/eventbus.v1.PubSub/Subscribe',\n request_serializer=pubsub__api__pb2.FetchRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.FetchResponse.FromString,\n )\n self.GetSchema = channel.unary_unary(\n '/eventbus.v1.PubSub/GetSchema',\n request_serializer=pubsub__api__pb2.SchemaRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.SchemaInfo.FromString,\n )\n self.GetTopic = channel.unary_unary(\n '/eventbus.v1.PubSub/GetTopic',\n request_serializer=pubsub__api__pb2.TopicRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.TopicInfo.FromString,\n )\n self.Publish = channel.unary_unary(\n '/eventbus.v1.PubSub/Publish',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n self.PublishStream = channel.stream_stream(\n '/eventbus.v1.PubSub/PublishStream',\n request_serializer=pubsub__api__pb2.PublishRequest.SerializeToString,\n response_deserializer=pubsub__api__pb2.PublishResponse.FromString,\n )\n\n\nclass PubSubServicer(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n def Subscribe(self, request_iterator, context):\n \"\"\"\n Bidirectional streaming RPC to subscribe to a Topic. The subscription is pull-based. A client can request\n for more events as it consumes events. This enables a client to handle flow control based on the client's processing speed.\n\n Typical flow:\n 1. Client requests for X number of events via FetchRequest.\n 2. Server receives request and delivers events until X events are delivered to the client via one or more FetchResponse messages.\n 3. Client consumes the FetchResponse messages as they come.\n 4. Client issues new FetchRequest for Y more number of events. This request can\n come before the server has delivered the earlier requested X number of events\n so the client gets a continuous stream of events if any.\n\n If a client requests more events before the server finishes the last\n requested amount, the server appends the new amount to the current amount of\n events it still needs to fetch and deliver.\n\n A client can subscribe at any point in the stream by providing a replay option in the first FetchRequest.\n The replay option is honored for the first FetchRequest received from a client. Any subsequent FetchRequests with a\n new replay option are ignored. A client needs to call the Subscribe RPC again to restart the subscription\n at a new point in the stream.\n\n The first FetchRequest of the stream identifies the topic to subscribe to.\n If any subsequent FetchRequest provides topic_name, it must match what\n was provided in the first FetchRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSchema(self, request, context):\n \"\"\"Get the event schema for a topic based on a schema ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTopic(self, request, context):\n \"\"\"\n Get the topic Information related to the specified topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Publish(self, request, context):\n \"\"\"\n Send a publish request to synchronously publish events to a topic.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def PublishStream(self, request_iterator, context):\n \"\"\"\n Bidirectional Streaming RPC to publish events to the event bus.\n PublishRequest contains the batch of events to publish.\n\n The first PublishRequest of the stream identifies the topic to publish on.\n If any subsequent PublishRequest provides topic_name, it must match what\n was provided in the first PublishRequest; otherwise, the RPC returns an error\n with INVALID_ARGUMENT status.\n\n The server returns a PublishResponse for each PublishRequest when publish is\n complete for the batch. A client does not have to wait for a PublishResponse\n before sending a new PublishRequest, i.e. multiple publish batches can be queued\n up, which allows for higher publish rate as a client can asynchronously\n publish more events while publishes are still in flight on the server side.\n\n PublishResponse holds a PublishResult for each event published that indicates success\n or failure of the publish. A client can then retry the publish as needed before sending\n more PublishRequests for new events to publish.\n\n A client must send a valid publish request with one or more events every 70 seconds to hold on to the stream.\n Otherwise, the server closes the stream and notifies the client. Once the client is notified of the stream closure,\n it must make a new PublishStream call to resume publishing.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PubSubServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Subscribe': grpc.stream_stream_rpc_method_handler(\n servicer.Subscribe,\n request_deserializer=pubsub__api__pb2.FetchRequest.FromString,\n response_serializer=pubsub__api__pb2.FetchResponse.SerializeToString,\n ),\n 'GetSchema': grpc.unary_unary_rpc_method_handler(\n servicer.GetSchema,\n request_deserializer=pubsub__api__pb2.SchemaRequest.FromString,\n response_serializer=pubsub__api__pb2.SchemaInfo.SerializeToString,\n ),\n 'GetTopic': grpc.unary_unary_rpc_method_handler(\n servicer.GetTopic,\n request_deserializer=pubsub__api__pb2.TopicRequest.FromString,\n response_serializer=pubsub__api__pb2.TopicInfo.SerializeToString,\n ),\n 'Publish': grpc.unary_unary_rpc_method_handler(\n servicer.Publish,\n request_deserializer=pubsub__api__pb2.PublishRequest.FromString,\n response_serializer=pubsub__api__pb2.PublishResponse.SerializeToString,\n ),\n 'PublishStream': grpc.stream_stream_rpc_method_handler(\n servicer.PublishStream,\n request_deserializer=pubsub__api__pb2.PublishRequest.FromString,\n response_serializer=pubsub__api__pb2.PublishResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'eventbus.v1.PubSub', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass PubSub(object):\n \"\"\"\n The Pub/Sub API provides a single interface for publishing and subscribing to platform events, including real-time\n event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.\n\n A session token is needed to authenticate. Any of the Salesforce supported\n OAuth flows can be used to obtain a session token:\n https://help.salesforce.com/articleView?id=sf.remoteaccess_oauth_flows.htm&type=5\n\n For each RPC, a client needs to pass authentication information\n as metadata headers (https://www.grpc.io/docs/guides/concepts/#metadata) with their method call.\n\n For Salesforce session token authentication, use:\n accesstoken : access token\n instanceurl : Salesforce instance URL\n tenantid : tenant/org id of the client\n\n StatusException is thrown in case of response failure for any request.\n \"\"\"\n\n @staticmethod\n def Subscribe(request_iterator,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,", "type": "random" } ]
[ " self.Subscribe = channel.stream_stream(", " servicer.Subscribe,", " self.GetSchema = channel.unary_unary(", " self.GetTopic = channel.unary_unary(", " servicer.GetSchema,", " self.Publish = channel.unary_unary(", " servicer.GetTopic,", " self.PublishStream = channel.stream_stream(", " servicer.Publish,", " servicer.PublishStream,", "", " PublishResponse holds a PublishResult for each event published that indicates success", " For each RPC, a client needs to pass authentication information", " publish more events while publishes are still in flight on the server side.", " event monitoring events, and change data capture events. The Pub/Sub API is a gRPC API that is based on HTTP/2.", " options=(),", " pubsub__api__pb2.PublishRequest.SerializeToString,", " metadata=None):", " insecure=False," ]
METASEP
41
autopkg__datajar-recipes
autopkg__datajar-recipes METASEP ngrok/ngrokVersioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for ngrokVersioner class ''' # Standard Imports from __future__ import absolute_import import subprocess import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['ngrokVersioner'] __version__ = '1.0' # pylint: disable = too-few-public-methods class ngrokVersioner(Processor): ''' Returns the version from the ngrok binary Raising if the key is not found. ''' description = __doc__ input_variables = { 'binary_path': { 'required': True, 'description': ('Path to the ngrok binary.'), }, } output_variables = { 'version': { 'description': ('Version of the ngrok binary.'), }, } def main(self): ''' See docstring for ngrokVersioner class ''' # var declaration version = None # Progress notification self.output("Looking for: {}".format(self.env['binary_path'])) # If binary exists if os.path.isfile(self.env['binary_path']): # Get binary version, from: https://www.ngrok.com/doc/9.54.0/Use.htm#Help_command # raise if we error try: version = subprocess.check_output([self.env['binary_path'], '-v'] ).split()[2].decode('utf-8') except subprocess.CalledProcessError: raise ProcessorError("Encountered an error when trying to get the " "ngrok binary version...") # Raise if binary is missing else: raise ProcessorError("Cannot access ngrok binary at path: {}" .format(self.env['binary_path'])) # We should only get here if we have passed the above, but this is belt and braces if version: self.env['version'] = version self.output("version: {}".format( self.env['version'])) else: raise ProcessorError("version is None") if __name__ == '__main__': PROCESSOR = ngrokVersioner() VMware Fusion 8/VMwareFusion8URLProvider.py METASEP #!/usr/bin/python # # Copyright 2014 Justin Rummel, # # Updates added 2018 by macmule: # https://github.com/autopkg/justinrummel-recipes/pull/7 # https://github.com/autopkg/justinrummel-recipes/pull/14 # # Thanks fuzzylogiq & Sterling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urllib, urllib2, gzip from xml.etree import ElementTree from StringIO import StringIO from autopkglib import Processor, ProcessorError from distutils.version import LooseVersion __all__ = ["VMwareFusion8URLProvider"] # variables VMWARE_BASE_URL = 'https://softwareupdate.vmware.com/cds/vmw-desktop/' FUSION = 'fusion.xml' MAJOR_VERSION = '8' # lock version in class VMwareFusion8URLProvider(Processor): description = "Provides URL to the latest VMware Fusion update release." input_variables = { "product_name": { "required": False, "description": "Default is '%s'." % FUSION, }, "base_url": { "required": False, "description": "Default is '%s." % VMWARE_BASE_URL, }, } output_variables = { "url": { "description": "URL to the latest VMware Fusion update release.", }, "version": { "description": "Version to the latest VMware Fusion update release.", }, } __doc__ = description def core_metadata(self, base_url, product_name, major_version): request = urllib2.Request(base_url+product_name) # print base_url try: vsus = urllib2.urlopen(request) except URLError, e: print e.reason data = vsus.read() # print data try: metaList = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" versions = [] for metadata in metaList: version = metadata.find("version") if (major_version == 'latest' or major_version == version.text.split('.')[0]): versions.append(version.text) if len(versions) == 0: raise ProcessorError("Could not find any versions for the \ major_version '%s'." % major_version) versions.sort(key=LooseVersion) self.latest = versions[-1] # print latest urls = [] for metadata in metaList: url = metadata.find("url") urls.append(url.text) matching = [s for s in urls if self.latest in s] core = [s for s in matching if "core" in s] # print core[0] vsus.close() request = urllib2.Request(base_url+core[0]) try: vLatest = urllib2.urlopen(request) except URLError, e: print e.reason buf = StringIO(vLatest.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() # print data try: metadataResponse = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" relativePath = metadataResponse.find("bulletin/componentList/component/relativePath") # print core[0].replace("metadata.xml.gz", relativePath.text) return base_url+core[0].replace("metadata.xml.gz", relativePath.text) def main(self): # Determine product_name, and base_url. product_name = self.env.get("product_name", FUSION) base_url = self.env.get("base_url", VMWARE_BASE_URL) major_version = self.env.get("major_version", MAJOR_VERSION) self.env["url"] = self.core_metadata(base_url, product_name, major_version) self.output("Found URL %s" % self.env["url"]) self.env["version"] = self.latest self.output("Found Version %s" % self.env["version"]) if __name__ == "__main__": processor = VMwareFusion8URLProvider() processor.execute_shell() VMware Fusion 12/DittoCopier.py METASEP #!/usr/local/autopkg/python # # Copyright 2010 Per Olofsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """See docstring for Copier class""" import glob import os.path import shutil import subprocess from autopkglib import ProcessorError from autopkglib.DmgMounter import DmgMounter __all__ = ["DittoCopier"] class DittoCopier(DmgMounter): """Copies source_path to destination_path. Uses the shell tool ditto in place of python's shutil.copy""" description = __doc__ input_variables = { "source_path": { "required": True, "description": ( "Path to a file or directory to copy. " "Can point to a path inside a .dmg which will be mounted. " "This path may also contain basic globbing characters such as " "the wildcard '*', but only the first result will be " "returned." ), }, "destination_path": {"required": True, "description": "Path to destination."} } output_variables = {} __doc__ = description def copy(self, source_item, dest_item): """Copies source_item to dest_item, overwriting if necessary""" # Remove destination if needed. if os.path.exists(dest_item) and overwrite: try: if os.path.isdir(dest_item) and not os.path.islink(dest_item): shutil.rmtree(dest_item) else: os.unlink(dest_item) except OSError as err: raise ProcessorError(f"Can't remove {dest_item}: {err.strerror}") # Copy file or directory. try: subprocess.run(['ditto', source_item, dest_item], stdout=subprocess.PIPE) self.output(f"Copied {source_item} to {dest_item}") except BaseException as err: raise ProcessorError(f"Can't copy {source_item} to {dest_item}: {err}") def main(self): source_path = self.env["source_path"] # Check if we're trying to copy something inside a dmg. (dmg_path, dmg, dmg_source_path) = self.parsePathForDMG(source_path) self.output( f"Parsed dmg results: dmg_path: {dmg_path}, dmg: {dmg}, " f"dmg_source_path: {dmg_source_path}", verbose_level=2, ) try: if dmg: # Mount dmg and copy path inside. mount_point = self.mount(dmg_path) source_path = os.path.join(mount_point, dmg_source_path) # process path with glob.glob matches = glob.glob(source_path) if len(matches) == 0: raise ProcessorError( f"Error processing path '{source_path}' with glob. " ) matched_source_path = matches[0] if len(matches) > 1: self.output( f"WARNING: Multiple paths match 'source_path' glob '{source_path}':" ) for match in matches: self.output(f" - {match}") if [c for c in "*?[]!" if c in source_path]: self.output( f"Using path '{matched_source_path}' matched from " f"globbed '{source_path}'." ) # do the copy self.copy( matched_source_path, self.env["destination_path"], ) finally: if dmg: self.unmount(dmg_path) if __name__ == "__main__": PROCESSOR = Copier() PROCESSOR.execute_shell() VMware Fusion 11/DittoCopier.py METASEP #!/usr/local/autopkg/python # # Copyright 2010 Per Olofsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """See docstring for Copier class""" import glob import os.path import shutil import subprocess from autopkglib import ProcessorError from autopkglib.DmgMounter import DmgMounter __all__ = ["DittoCopier"] class DittoCopier(DmgMounter): """Copies source_path to destination_path. Uses the shell tool ditto in place of python's shutil.copy""" description = __doc__ input_variables = { "source_path": { "required": True, "description": ( "Path to a file or directory to copy. " "Can point to a path inside a .dmg which will be mounted. " "This path may also contain basic globbing characters such as " "the wildcard '*', but only the first result will be " "returned." ), }, "destination_path": {"required": True, "description": "Path to destination."} } output_variables = {} __doc__ = description def copy(self, source_item, dest_item): """Copies source_item to dest_item, overwriting if necessary""" # Remove destination if needed. if os.path.exists(dest_item) and overwrite: try: if os.path.isdir(dest_item) and not os.path.islink(dest_item): shutil.rmtree(dest_item) else: os.unlink(dest_item) except OSError as err: raise ProcessorError(f"Can't remove {dest_item}: {err.strerror}") # Copy file or directory. try: subprocess.run(['ditto', source_item, dest_item], stdout=subprocess.PIPE) self.output(f"Copied {source_item} to {dest_item}") except BaseException as err: raise ProcessorError(f"Can't copy {source_item} to {dest_item}: {err}") def main(self): source_path = self.env["source_path"] # Check if we're trying to copy something inside a dmg. (dmg_path, dmg, dmg_source_path) = self.parsePathForDMG(source_path) self.output( f"Parsed dmg results: dmg_path: {dmg_path}, dmg: {dmg}, " f"dmg_source_path: {dmg_source_path}", verbose_level=2, ) try: if dmg: # Mount dmg and copy path inside. mount_point = self.mount(dmg_path) source_path = os.path.join(mount_point, dmg_source_path) # process path with glob.glob matches = glob.glob(source_path) if len(matches) == 0: raise ProcessorError( f"Error processing path '{source_path}' with glob. " ) matched_source_path = matches[0] if len(matches) > 1: self.output( f"WARNING: Multiple paths match 'source_path' glob '{source_path}':" ) for match in matches: self.output(f" - {match}") if [c for c in "*?[]!" if c in source_path]: self.output( f"Using path '{matched_source_path}' matched from " f"globbed '{source_path}'." ) # do the copy self.copy( matched_source_path, self.env["destination_path"], ) finally: if dmg: self.unmount(dmg_path) if __name__ == "__main__": PROCESSOR = Copier() PROCESSOR.execute_shell() VMware Fusion 10/VMwareFusion10URLProvider.py METASEP #!/usr/bin/python # # Copyright 2014 Justin Rummel, # # Updates added 2018 by macmule: # https://github.com/autopkg/justinrummel-recipes/pull/7 # https://github.com/autopkg/justinrummel-recipes/pull/14 # # Thanks fuzzylogiq & Sterling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urllib, urllib2, gzip from xml.etree import ElementTree from StringIO import StringIO from autopkglib import Processor, ProcessorError from distutils.version import LooseVersion __all__ = ["VMwareFusion10URLProvider"] # variables VMWARE_BASE_URL = 'https://softwareupdate.vmware.com/cds/vmw-desktop/' FUSION = 'fusion.xml' MAJOR_VERSION = '10' # lock version in class VMwareFusion10URLProvider(Processor): description = "Provides URL to the latest VMware Fusion update release." input_variables = { "product_name": { "required": False, "description": "Default is '%s'." % FUSION, }, "base_url": { "required": False, "description": "Default is '%s." % VMWARE_BASE_URL, }, } output_variables = { "url": { "description": "URL to the latest VMware Fusion update release.", }, "version": { "description": "Version to the latest VMware Fusion update release.", }, } __doc__ = description def core_metadata(self, base_url, product_name, major_version): request = urllib2.Request(base_url+product_name) # print base_url try: vsus = urllib2.urlopen(request) except URLError, e: print e.reason data = vsus.read() # print data try: metaList = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" versions = [] for metadata in metaList: version = metadata.find("version") if (major_version == 'latest' or major_version == version.text.split('.')[0]): versions.append(version.text) if len(versions) == 0: raise ProcessorError("Could not find any versions for the \ major_version '%s'." % major_version) versions.sort(key=LooseVersion) self.latest = versions[-1] # print latest urls = [] for metadata in metaList: url = metadata.find("url") urls.append(url.text) matching = [s for s in urls if self.latest in s] core = [s for s in matching if "core" in s] # print core[0] vsus.close() request = urllib2.Request(base_url+core[0]) try: vLatest = urllib2.urlopen(request) except URLError, e: print e.reason buf = StringIO(vLatest.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() # print data try: metadataResponse = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" relativePath = metadataResponse.find("bulletin/componentList/component/relativePath") # print core[0].replace("metadata.xml.gz", relativePath.text) return base_url+core[0].replace("metadata.xml.gz", relativePath.text) def main(self): # Determine product_name, and base_url. product_name = self.env.get("product_name", FUSION) base_url = self.env.get("base_url", VMWARE_BASE_URL) major_version = self.env.get("major_version", MAJOR_VERSION) self.env["url"] = self.core_metadata(base_url, product_name, major_version) self.output("Found URL %s" % self.env["url"]) self.env["version"] = self.latest self.output("Found Version %s" % self.env["version"]) if __name__ == "__main__": processor = VMwareFusion10URLProvider() processor.execute_shell() Traffic/TrafficXMLParser.py METASEP #!/usr/bin/python # Copyright 2020 dataJAR # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=import-error, too-few-public-methods """See docstring for TrafficXMLParser class""" from __future__ import absolute_import import os from xml.etree import ElementTree from autopkglib import Processor, ProcessorError __all__ = ["TrafficXMLParser"] __version__ = '1.1' class TrafficXMLParser(Processor): """Parses /META-INF/AIR/application.xml from the copied .air installer""" description = __doc__ input_variables = { "app_xml": { "required": True, "description": "Path to the application.xml." }, } output_variables = { "bundleid": { "description": "Bundled ID.", }, "version": { "description": "The value of CFBundleShortVersionString for the app bundle." }, } def main(self): """Parses /META-INF/AIR/application.xml from the copied .air installer""" if not os.path.exists(self.env["app_xml"]): raise ProcessorError("application.xml not found at %s" % self.env["app_xml"]) else: tree = ElementTree.parse(self.env["app_xml"]) for b_id in tree.iterfind('{http://ns.adobe.com/air/application/24.0}id'): self.env["bundleid"] = b_id.text for ver_num in tree.iterfind('{http://ns.adobe.com/air/application/24.0}versionNumber'): self.env["version"] = ver_num.text self.output("bundleid: %s" % self.env["bundleid"]) self.output("version: %s" % self.env["version"]) if __name__ == "__main__": PROCESSOR = TrafficXMLParser() Shared Processors/TempFileFinder.py METASEP #!/usr/local/autopkg/python # # Copyright 2013 Jesse Peterson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Temp as waiting PR merging and release including PR - https://github.com/autopkg/autopkg/pull/742 # """See docstring for TempFileFinder class""" import os.path from glob import glob from autopkglib import ProcessorError from autopkglib.DmgMounter import DmgMounter __all__ = ["TempFileFinder"] class TempFileFinder(DmgMounter): """Finds a filename for use in other Processors. Currently only supports glob filename patterns. Requires version 0.2.3. """ input_variables = { "pattern": { "description": "Shell glob pattern to match files by", "required": True, }, "find_method": { "description": ( "Type of pattern to match. Currently only " 'supported type is "glob" (also the default)' ), "default": "glob", "required": False, }, } output_variables = { "found_filename": {"description": "Full path of found filename"}, "dmg_found_filename": {"description": "DMG-relative path of found filename"}, "found_basename": {"description": "Basename of found filename"}, } description = __doc__ def globfind(self, pattern): """If multiple files are found the last alphanumerically sorted found file is returned""" glob_matches = glob(pattern, recursive=True) if len(glob_matches) < 1: raise ProcessorError("No matching filename found") glob_matches.sort() return glob_matches[-1] def main(self): pattern = self.env.get("pattern") method = self.env.get("find_method") if method != "glob": raise ProcessorError(f"Unsupported find_method: {method}") source_path = pattern # Check if we're trying to copy something inside a dmg. (dmg_path, dmg, dmg_source_path) = self.parsePathForDMG(source_path) try: if dmg: # Mount dmg and copy path inside. mount_point = self.mount(dmg_path) source_path = os.path.join(mount_point, dmg_source_path) # process path with globbing match = self.globfind(source_path) self.env["found_filename"] = match self.output( f"Found file match: '{self.env['found_filename']}' from globbed '{source_path}'" ) if dmg and match.startswith(mount_point): self.env["dmg_found_filename"] = match[len(mount_point) :].lstrip("/") self.output( f"DMG-relative file match: '{self.env['dmg_found_filename']}'" ) if match.endswith('/'): self.env["found_basename"] = os.path.basename(match.rstrip("/")) else: self.env["found_basename"] = os.path.basename(match) self.output( f"Basename match: '{self.env['found_basename']}'" ) finally: if dmg: self.unmount(dmg_path) if __name__ == "__main__": PROCESSOR = TempFileFinder() PROCESSOR.execute_shell() Shared Processors/JSONFileReader.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for JSONFileReader class ''' # Standard Imports from __future__ import absolute_import import json import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['JSONFileReader'] __version__ = '1.0' # pylint: disable = too-few-public-methods class JSONFileReader(Processor): ''' Parses a JSON file, returning the value of the supplied key. Raising if the key is not found. ''' description = __doc__ input_variables = { 'json_key': { 'required': True, 'description': ('Key to look for, and return the value of'), }, 'json_path': { 'required': True, 'description': ('Path to the JSON file'), }, } output_variables = { 'json_value': { 'description': ('Value of the JSON key'), }, } def main(self): ''' See docstring for JSONFileReader class ''' # Progress notification self.output("Looking for: {}".format(self.env['json_path'])) if os.path.isfile(self.env['json_path']): # Read in JSON file with open(self.env['json_path']) as json_file: # Try to parse json_path as json, raise if an issue try: load_json = json.load(json_file) except json.JSONDecodeError as err_msg: raise ProcessorError("Failed to parse {}: {}".format(self.env['json_path'], err_msg)) # Look for value of key json_key, raise if an issue try: self.env['json_value'] = load_json[self.env['json_key']] except KeyError: raise ProcessorError("Cannot find key {} within json file: {}" .format(self.env['json_key'], self.env['json_path'])) else: raise ProcessorError("Cannot access JSON file at path: {}" .format(self.env['json_path'])) self.output("json_value: {}".format(self.env['json_value'])) if __name__ == '__main__': PROCESSOR = JSONFileReader() Shared Processors/DistributionPkgInfo.py METASEP #!/usr/bin/python # Copyright 2020 dataJAR # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=import-error, too-few-public-methods """See docstring for DistributionPkgInfo class""" from __future__ import absolute_import from __future__ import print_function import os import subprocess from xml.etree import ElementTree from autopkglib import Processor, ProcessorError __all__ = ["DistributionPkgInfo"] __version__ = '1.1.1' class DistributionPkgInfo(Processor): """Parses a distribution pkg to pull the info, other formats to be added later""" description = __doc__ input_variables = { "pkg_path": { "required": True, "description": ("Path to the Pkg.."), }, } output_variables = { "pkg_id": { "description": ("The package ID.."), }, "version": { "description": ("The version of the pkg from it's info"), }, } # pylint: disable=too-many-branches def main(self): """Cobbled together from various sources, should extract information from a Distribution pkg""" # Build dir as needed,pinched with <3 from: # https://github.com/autopkg/autopkg/blob/master/Code/autopkglib/FlatPkgUnpacker.py#L72 # Extract pkg info, pinched with <3 from: # https://github.com/munki/munki/blob/master/code/client/munkilib/pkgutils.py#L374 self.env["abspkgpath"] = os.path.join(self.env["pkg_path"]) file_path = os.path.join(self.env["RECIPE_CACHE_DIR"], "downloads") cmd_toc = ['/usr/bin/xar', '-tf', self.env["abspkgpath"]] proc = subprocess.Popen(cmd_toc, bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (toc, err) = proc.communicate() toc = toc.decode("utf-8") .strip().split('\n') if proc.returncode == 0: # Walk trough the TOC entries if not os.path.exists(file_path): os.mkdir(file_path) for toc_entry in [item for item in toc if item.startswith('Distribution')]: cmd_extract = ['/usr/bin/xar', '-xf', self.env["abspkgpath"], \ toc_entry, '-C', file_path] _ = subprocess.call(cmd_extract) else: raise ProcessorError("pkg not found at pkg_path") dist_path = os.path.join(file_path, "Distribution") version = None pkg_id = None if not os.path.exists(dist_path): raise ProcessorError("Cannot find Distribution") else: tree = ElementTree.parse(dist_path) _ = tree.getroot() try: for elem in tree.iter(tag='product'): version = elem.get("version") for elem in tree.iter(tag='pkg-ref'): pkg_id = elem.get("id") except ElementTree.ParseError as err: print(("Can't parse distribution file %s: %s" % ('dist_path', err.strerror))) if not pkg_id: raise ProcessorError("cannot get pkg_id") else: self.env["pkg_id"] = pkg_id if not version: raise ProcessorError("cannot get version") else: self.env["version"] = version os.remove(dist_path) if __name__ == '__main__': PROCESSOR = DistributionPkgInfo() MacTeX Ghostscript/GhostscriptVersioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for GhostscriptVersioner class ''' # Standard Imports from __future__ import absolute_import import subprocess import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['GhostscriptVersioner'] __version__ = '1.0' # pylint: disable = too-few-public-methods class GhostscriptVersioner(Processor): ''' Returns the version from the Ghostscript binary Raising if the key is not found. ''' description = __doc__ input_variables = { 'binary_path': { 'required': True, 'description': ('Path to the Ghostscript binary.'), }, } output_variables = { 'version': { 'description': ('Version of the Ghostscript binary.'), }, } def main(self): ''' See docstring for GhostscriptVersioner class ''' # var declaration version = None # Progress notification self.output("Looking for: {}".format(self.env['binary_path'])) # If binary exists if os.path.isfile(self.env['binary_path']): # Get binary version, from: https://www.ghostscript.com/doc/9.54.0/Use.htm#Help_command # raise if we error try: version = subprocess.check_output([self.env['binary_path'], '-v'] ).split()[2].decode('utf-8') except subprocess.CalledProcessError: raise ProcessorError("Encountered an error when trying to get the " "Ghostscript binary version...") # Raise if binary is missing else: raise ProcessorError("Cannot access Ghostscript binary at path: {}" .format(self.env['binary_path'])) # We should only get here if we have passed the above, but this is belt and braces if version: self.env['version'] = version self.output("version: {}".format( self.env['version'])) else: raise ProcessorError("version is None") if __name__ == '__main__': PROCESSOR = GhostscriptVersioner() BrowserStackLocal/BrowserStackLocalBinaryVersioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for BrowserStackLocalBinaryVersioner class ''' # Standard Imports from __future__ import absolute_import import subprocess import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['BrowserStackLocalBinaryVersioner'] __version__ = '1.0' # pylint: disable = too-few-public-methods class BrowserStackLocalBinaryVersioner(Processor): ''' Returns the version from the BrowserStackLocal binary Raising if the key is not found. ''' description = __doc__ input_variables = { 'binary_path': { 'required': True, 'description': ('Path to the BrowserStackLocal binary.'), }, } output_variables = { 'version': { 'description': ('Version of the BrowserStackLocal binary.'), }, } def main(self): ''' See docstring for BrowserStackLocalBinaryVersioner class ''' # var declaration version = None # Progress notification self.output("Looking for: {}".format(self.env['binary_path'])) # If binary exists if os.path.isfile(self.env['binary_path']): # Get binary version, from: https://www.browserstack.com/local-testing/binary-params # raise if we error try: version = subprocess.check_output([self.env['binary_path'], '--version'] ).split()[3].decode('utf-8') except subprocess.CalledProcessError: raise ProcessorError("Encountered an error when trying to get the " "BrowserStackLocal binary version...") # Raise if binary is missing else: raise ProcessorError("Cannot access BrowserStackLocal binary at path: {}" .format(self.env['binary_path'])) # We should only get here if we have passed the above, but this is belt and braces if version: self.env['version'] = version self.output("version: {}".format( self.env['version'])) else: raise ProcessorError("version is None") if __name__ == '__main__': PROCESSOR = BrowserStackLocalBinaryVersioner() Adobe CC 2019/AdobeCC2019Versioner.py METASEP #!/usr/bin/python # Copyright 2021 dataJAR # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=import-error """See docstring for AdobeCC2019Versioner class""" from __future__ import absolute_import import glob import json import os import re import zipfile from xml.etree import ElementTree try: from plistlib import loads as load_plist except ImportError: from FoundationPlist import readPlistFromString as load_plist from autopkglib import Processor, ProcessorError __all__ = ['AdobeCC2019Versioner'] __version__ = ['1.2.1'] class AdobeCC2019Versioner(Processor): """Parses generated Adobe Admin Console CC 2019 pkgs for detailed application path and bundle version info""" description = __doc__ input_variables = { } output_variables = { 'additional_pkginfo': { 'description': 'Some pkginfo fields extracted from the Adobe metadata.', }, 'jss_inventory_name': { 'description': 'Application title for jamf pro smart group criteria.', }, 'version': { 'description': ('The value of CFBundleShortVersionString for the app bundle. ' 'This may match user_facing_version, but it may also be more ' 'specific and add another version component.'), }, } def main(self): """Find the Adobe*_Install.pkg in the Downloads dir based on the name""" download_path = os.path.expanduser('~/Downloads') self.env['PKG'] = os.path.join(download_path, self.env['NAME'], \ 'Build', self.env['NAME'] + '_Install.pkg') self.output('pkg %s' % self.env['PKG']) self.env['uninstaller_pkg_path'] = glob.glob(os.path.join\ (os.path.dirname(self.env['PKG']), '*_Uninstall.pkg'))[0] self.process_installer() def process_installer(self): ''' Determine a pkginfo, version and jss inventory name from the created package. Inputs: PKG: Path to the pkg Outputs: app_json/proxy_xml: The path of the files that within the pkg's ''' install_lang = None option_xml_path = os.path.join(self.env['PKG'], 'Contents', 'Resources', 'optionXML.xml') self.output('Processing %s' % option_xml_path) option_xml = ElementTree.parse(option_xml_path) for hd_media in option_xml.findall('.//HDMedias/HDMedia'): if hd_media.findtext('MediaType') == 'Product': install_lang = hd_media.findtext('installLang') self.env['sap_code'] = hd_media.findtext('SAPCode') self.output('SAP Code: %s' % self.env['sap_code']) self.env['target_folder'] = hd_media.findtext('TargetFolderName') if install_lang is None: for ribs_media in option_xml.findall('.//Medias/Media'): install_lang = ribs_media.findtext('installLang') self.env['sap_code'] = ribs_media.findtext('SAPCode') self.output('SAP Code: %s' % self.env['sap_code']) self.env['target_folder'] = ribs_media.findtext('TargetFolderName') self.env['app_json'] = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], 'Application.json') # If Application.json exists, we're looking at a HD installer if os.path.exists(self.env['app_json']): if not self.env['sap_code'] is 'APRO': self.output('Installer is HyperDrive') self.output('app_json: %s' % self.env['app_json']) self.process_hd_installer() else: # If not a HD installer Acrobat is a 'current' title with a # RIBS PKG installer we can extract needed metadata from self.env['proxy_xml'] = os.path.join(self.env['PKG'], 'Contents/Resources/Setup', \ self.env['target_folder'], 'proxy.xml') if not os.path.exists(self.env['proxy_xml']): raise ProcessorError('APRO selected, proxy.xml not found at: %s' \ % self.env['proxy_xml']) else: self.process_apro_installer() def process_apro_installer(self): ''' Process APRO installer - proxy_xml: Path to proxy_xml if pkg is APRO ''' self.output('Processing Acrobat installer') self.output('proxy_xml: %s' % self.env['proxy_xml']) tree = ElementTree.parse(self.env['proxy_xml']) root = tree.getroot() app_bundle_text = root.findtext\ ('./ThirdPartyComponent/Metadata/Properties/Property[@name=\'path\']') app_bundle = app_bundle_text.split('/')[1] self.output('app_bundle: %s' % app_bundle) app_path_text = root.findtext('./InstallDir/Platform') self.output('app_path_text: %s' % app_path_text) app_path = app_path_text.split('/')[1] self.output('app_path: %s' % app_path) installed_path = os.path.join('/Applications', app_path, app_bundle) self.output('installed_path: %s' % installed_path) app_version = root.findtext('./InstallerProperties/Property[@name=\'ProductVersion\']') self.output('app_version: %s' % app_version) self.env['display_name'] = app_path + ' CC 2019' self.output('display_name: %s' % self.env['display_name']) self.env['vers_compare_key'] = 'CFBundleShortVersionString' self.output('vers_compare_key: %s' % self.env['vers_compare_key']) app_bundle_id = 'com.adobe.Acrobat.Pro' self.output('app_bundle_id: %s' % app_bundle_id) self.create_pkginfo(app_bundle, app_bundle_id, app_version, installed_path) # pylint: disable=too-many-branches def process_hd_installer(self): ''' Process HD installer - app_json: Path to the Application JSON from within the PKG ''' #pylint: disable=too-many-locals, too-many-statements self.output('Processing HD installer') with open(self.env['app_json']) as json_file: load_json = json.load(json_file) # AppLaunch is not always in the same format, but is splittable if 'AppLaunch' in load_json: # Bridge CC is HD but does not have AppLaunch app_launch = load_json['AppLaunch'] self.output('app_launch: %s' % app_launch) app_details = list(re.split('/', app_launch)) if app_details[2].endswith('.app'): app_bundle = app_details[2] app_path = app_details[1] else: app_bundle = app_details[1] app_path = list(re.split('/', (load_json['InstallDir']['value'])))[1] self.output('app_bundle: %s' % app_bundle) self.output('app_path: %s' % app_path) installed_path = os.path.join('/Applications', app_path, app_bundle) self.output('installed_path: %s' % installed_path) if not app_path.endswith('CC') and not app_path.endswith('CC 2019'): self.env['display_name'] = app_path + ' CC 2019' elif app_path.endswith('CC') and not app_path.endswith('CC 2019'): self.env['display_name'] = app_path + ' 2019' else: self.env['display_name'] = app_path self.output('display_name: %s' % self.env['display_name']) zip_file = load_json['Packages']['Package'][0]['PackageName'] self.output('zip_file: %s' % zip_file) zip_path = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], zip_file + '.zip') self.output('zip_path: %s' % zip_path) with zipfile.ZipFile(zip_path, mode='r') as myzip: with myzip.open(zip_file + '.pimx') as mytxt: txt = mytxt.read() tree = ElementTree.fromstring(txt) # Loop through .pmx's Assets, look for target=[INSTALLDIR], # then grab Assets Source. # Break when found .app/Contents/Info.plist for elem in tree.findall('Assets'): for i in elem.getchildren(): if i.attrib['target'].upper().startswith('[INSTALLDIR]'): bundle_location = i.attrib['source'] self.output('bundle_location: %s' % bundle_location) else: continue if not bundle_location.startswith('[StagingFolder]'): continue elif bundle_location.endswith('Icons') or \ bundle_location.endswith('AMT'): continue else: bundle_location = bundle_location[16:] if bundle_location.endswith('.app'): zip_bundle = os.path.join('1', bundle_location, \ 'Contents/Info.plist') else: zip_bundle = os.path.join('1', bundle_location, \ app_bundle, 'Contents/Info.plist') try: with myzip.open(zip_bundle) as myplist: plist = myplist.read() data = load_plist(plist) if self.env['sap_code'] == 'LTRM': self.env['vers_compare_key'] = 'CFBundleVersion' else: self.env['vers_compare_key'] = \ 'CFBundleShortVersionString' self.output('vers_compare_key: %s' % \ self.env['vers_compare_key']) app_version = data[self.env['vers_compare_key']] app_bundle_id = data['CFBundleIdentifier'] self.output('app_bundle_id: %s' % app_bundle_id) self.output('staging_folder: %s' % bundle_location) self.output('staging_folder_path: %s' % zip_bundle) self.output('app_version: %s' % app_version) self.output('app_bundle: %s' % app_bundle) break except zipfile.BadZipfile: continue # Now we have the deets, let's use them self.create_pkginfo(app_bundle, app_bundle_id, app_version, installed_path) def create_pkginfo(self, app_bundle, app_bundle_id, app_version, installed_path): """Create pkginfo with found details Args: app_bundle (str): Bundle name app_version (str): Bundle version installed_path (str): The path where the installed item will be installed. """ self.env['jss_inventory_name'] = app_bundle self.env['pkg_path'] = self.env['PKG'] self.env['version'] = app_version pkginfo = { 'display_name': self.env['display_name'], 'minimum_os_version': self.env['MINIMUM_OS_VERSION'] } if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']: pkginfo['installs'] = [{ self.env['vers_compare_key']: self.env['version'], 'path': installed_path, 'type': 'application', 'version_comparison_key': self.env['vers_compare_key'], 'CFBundleIdentifier': app_bundle_id, }] self.env['additional_pkginfo'] = pkginfo self.output('additional_pkginfo: %s' % self.env['additional_pkginfo']) if __name__ == '__main__': PROCESSOR = AdobeCC2019Versioner() Adobe CC 2019/AdobeCC2019Importer.py METASEP #!/usr/bin/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed "as is" by DATA JAR LTD. DESCRIPTION Imports Adobe CC 2019 titles found in running users ~/Downloads ''' from __future__ import absolute_import from __future__ import print_function import argparse import glob import os import subprocess import sys __version__ = '1.1' def main(): '''Gimme some main''' adobe_folders = [] for some_item in os.listdir(DOWNLOADS_PATH): some_path = os.path.join(DOWNLOADS_PATH, some_item) if os.path.isdir(some_path): if some_item.startswith('Adobe') and some_item.endswith('CC2019'): adobe_folders.append(some_item) if not len(adobe_folders): print('No Adobe*CC2019 folders found in %s, exiting...' % DOWNLOADS_PATH) sys.exit(1) if len(adobe_folders) == 1: print('1 Adobe CC 2019 folder found, creating recipe list...') else: print('%s Adobe CC 2019 folder found, creating recipe list...' % len(adobe_folders)) open(ADOBE_LIST, 'w').close() pkg_checker(adobe_folders) def pkg_checker(adobe_folders): '''Check that we have the Install_pkg's & proceed if we do''' found_pkgs = 0 print('Looking for pkgs...') for adobe_folder in sorted(adobe_folders): try: install_pkg = glob.glob(os.path.join(DOWNLOADS_PATH, adobe_folder, \ 'Build', '*_Install.pkg'))[0] print('Found {0}...'.format(install_pkg)) if os.path.exists(install_pkg): create_list(adobe_folder) found_pkgs += 1 else: print('Cannot find pkg ({0}), for {1}... Skipping...'.format\ (install_pkg, adobe_folder)) except IndexError as err_msg: print('Skipping {0}, as cannot find Install.pkg: {1}...'.format(adobe_folder, err_msg)) if found_pkgs == 0: print('No pkgs found, exiting...') sys.exit(1) else: run_list() def create_list(adobe_folder): ''' Create recipe list ''' library_dir = os.path.expanduser('~/Library/') override_path = os.path.join(library_dir, 'AutoPkg', 'RecipeOverrides', \ adobe_folder + '.' \ + RECIPE_TYPE + '.recipe') override_name = 'local.' + RECIPE_TYPE + '.' + adobe_folder if not os.path.isfile(override_path): print('Skipping {0}, as cannot find override...'.format(override_path)) list_file = open(ADOBE_LIST, 'a+') list_file.write(override_name + '\n') list_file.close() def run_list(): '''Run recipe list''' if os.path.exists(ADOBE_LIST): print('Running recipe_list: `{0}`'.format(ADOBE_LIST)) print() cmd_args = ['/usr/local/bin/autopkg', 'run', '-v', '--recipe-list', ADOBE_LIST, \ '--report-plist', REPORT_PATH] print('Running `{0}`...'.format(cmd_args)) subprocess.call(cmd_args) else: print('Recipe list not populated, make sure you have the needed overrides in place....') if __name__ == '__main__': # Try to locate autopkg if not os.path.exists('/usr/local/bin/autopkg'): print('Cannot find autopkg') sys.exit(1) # Parse recipe type argument PARSER = argparse.ArgumentParser() PARSER.add_argument('type', type=str, help='Recipe type, either "munki" or "jss"') PARSER_ARGS = PARSER.parse_args() RECIPE_TYPE = PARSER_ARGS.type.lower() # Constants DOWNLOADS_PATH = os.path.expanduser('~/Downloads/') ADOBE_LIST = os.path.join(DOWNLOADS_PATH + 'adobecc2019_list.txt') REPORT_PATH = os.path.join(DOWNLOADS_PATH + 'adobecc2019_report.plist') main() Adobe 2021/Adobe2021Versioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for AdobeCC2021Versioner class ''' # Standard Imports from __future__ import absolute_import import json import os import re import xml import zipfile from xml.etree import ElementTree # AutoPkg imports # pylint: disable = import-error try: from plistlib import loads as load_plist except ImportError: from FoundationPlist import readPlistFromString as load_plist from autopkglib import Processor, ProcessorError # Define class __all__ = ['Adobe2021Versioner'] __version__ = ['1.4.10'] # Class def class Adobe2021Versioner(Processor): ''' Parses generated Adobe Admin Console 2021 pkgs for detailed application path and bundle version info. ''' description = __doc__ input_variables = { } output_variables = { 'additional_pkginfo': { 'description': 'Some pkginfo fields extracted from the Adobe metadata.', }, 'jss_inventory_name': { 'description': 'Application title for jamf pro smart group criteria.', }, 'version': { 'description': ('The value of CFBundleShortVersionString for the app bundle. ' 'This may match user_facing_version, but it may also be more ' 'specific and add another version component.'), }, 'architecture_type': { 'description': ('The value of ProcessorArchitecture for the package. ' 'This is either -Intel or -ARM to add with renaming the ' 'package disk image'), }, } def main(self): ''' Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise if corresponding *_Uninstall.pkg is missing. Then determine a pkginfo, version and jss inventory name from the Adobe*_Install.pkg ''' # var declaration download_path = os.path.expanduser('~/Downloads') install_lang = None # Path to Adobe*_Install.pkg in the titles Downloads folder self.env['PKG'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Install.pkg')) self.output("install_pkg {}".format(self.env['PKG'])) # Path to Adobe*_Uninstall.pkg n the titles Downloads folder self.env['uninstaller_pkg_path'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Uninstall.pkg')) self.output("uninstall_pkg {}".format(self.env['uninstaller_pkg_path'])) # Path to titles optionXML.xml option_xml_path = os.path.join(self.env['PKG'], 'Contents', 'Resources', 'optionXML.xml') self.output("Processing {}".format(option_xml_path)) # Try to parse option_xml, raise if an issue try: option_xml = ElementTree.parse(option_xml_path) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(option_xml_path, err_msg)) # Check to see if HDMedia keys set for hd_media in option_xml.findall('.//HDMedias/HDMedia'): # If we have HDMedia, set vars if hd_media.findtext('MediaType') == 'Product': install_lang = hd_media.findtext('installLang') self.env['sap_code'] = hd_media.findtext('SAPCode') self.env['target_folder'] = hd_media.findtext('TargetFolderName') # Check for Processor Architecture self.env['architecture_type'] = option_xml.findtext('ProcessorArchitecture') # If no HDMedia is found, then install_lang will be none if install_lang is None: # Get vars for RIBS media for ribs_media in option_xml.findall('.//Medias/Media'): install_lang = ribs_media.findtext('installLang') self.env['sap_code'] = ribs_media.findtext('SAPCode') self.env['target_folder'] = ribs_media.findtext('TargetFolderName') # Display progress self.output("sap_code: {}".format(self.env['sap_code'])) self.output("target_folder: {}".format(self.env['target_folder'])) self.output("architecture_type: {}".format(self.env['architecture_type'])) # Get app_json var self.env['app_json'] = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], 'Application.json') # If Application.json exists, we're looking at a HD installer if os.path.exists(self.env['app_json']): if not self.env['sap_code'] == 'APRO': # Process HD installer self.process_hd_installer_pt1() else: # If not a HD installer Acrobat is a 'current' title with a # RIBS PKG installer we can extract needed metadata from self.env['proxy_xml'] = (os.path.join(self.env['PKG'], 'Contents/Resources/Setup', self.env['target_folder'], 'proxy.xml')) # If proxy_xml does not exist, raise if not os.path.exists(self.env['proxy_xml']): raise ProcessorError("APRO selected, proxy.xml not found at: {}" .format(self.env['proxy_xml'])) # Else, process the APRO (Acrobat) installer self.process_apro_installer() def process_apro_installer(self): ''' Process APRO (Acrobat) installer ''' # Progress notification self.output("Processing Acrobat installer") self.output("proxy_xml: {}".format(self.env['proxy_xml'])) # Try to parse proxy_xml, raise if an issue try: parse_xml = ElementTree.parse(self.env['proxy_xml']) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(self.env['proxy_xml'], err_msg)) # Get root of xml root = parse_xml.getroot() # Get app_bundle app_bundle_text = (root.findtext ('./ThirdPartyComponent/Metadata/Properties/Property[@name=\'path\']')) self.env['app_bundle'] = app_bundle_text.split('/')[1] self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path app_path_text = root.findtext('./InstallDir/Platform') self.env['app_path'] = app_path_text.split('/')[1] self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # Get app_version self.env['app_version'] = (root.findtext ('./InstallerProperties/Property[@name=\'ProductVersion\']')) self.output("app_version: {}".format(self.env['app_version'])) # Get vers_compare_key self.env['vers_compare_key'] = 'CFBundleShortVersionString' self.output("vers_compare_key: {}".format(self.env['vers_compare_key'])) # Set bundle id self.env['app_bundle_id'] = 'com.adobe.Acrobat.Pro' self.output("app_bundle_id: {}".format(self.env['app_bundle_id'])) # Create pkginfo with found details self.create_pkginfo() def process_hd_installer_pt1(self): ''' Process HD installer - part 1 ''' # Progress notification self.output("Processing HD installer") # Read in app_json file with open(self.env['app_json']) as json_file: # Try to parse app_json as json, raise if an issue try: load_json = json.load(json_file) except json.JSONDecodeError as err_msg: raise ProcessorError("Failed to parse {}: {}".format(self.env['app_json'], err_msg)) # Get app_launch app_launch = load_json['AppLaunch'] self.output("app_launch: {}".format(app_launch)) # Get app_details, app_bundle and app_path app_details = list(re.split('/', app_launch)) if app_details[2].endswith('.app'): app_bundle = app_details[2] app_path = app_details[1] else: app_bundle = app_details[1] app_path = list(re.split('/', (load_json['InstallDir']['value'])))[1] # Get app_bundle self.env['app_bundle'] = app_bundle self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path self.env['app_path'] = app_path self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # 2nd part of process self.process_hd_installer_pt2(load_json) def process_hd_installer_pt2(self, load_json): ''' Process HD installer - part 2 ''' # Get name of the zip_file were to open zip_file = load_json['Packages']['Package'][0]['PackageName'] self.output("zip_file: {}".format(zip_file)) # Get pimx_dir if zip_file.endswith('-LearnPanel'): zip_file = load_json['Packages']['Package'][1]['PackageName'] pimx_dir = '2' else: pimx_dir = '1' self.output("pimx_dir: {}".format(pimx_dir)) # Get zip_path zip_path = (os.path.join(self.env['PKG'], 'Contents/Resources/HD', self.env['target_folder'], zip_file + '.zip')) self.output("zip_path: {}".format(zip_path)) # Open zip file, raise if fails try: with zipfile.ZipFile(zip_path, mode='r') as my_zip: # Read in pimx file with my_zip.open(zip_file + '.pimx') as my_txt: # Read in pimx file pimx_txt = my_txt.read() # Try to parse pimx file as XML, raise exception if fails try: xml_tree = ElementTree.fromstring(pimx_txt) # Try to read info.plist from within zip_bundle self.read_info_plist(my_zip, pimx_dir, xml_tree, zip_path) # If we cannot read in the pimx except xml.etree.ElementTree.ParseError as err_msg: self.output("Parsing {} failed with: {}, checking {}" .format(zip_file, err_msg, self.env['app_json'])) # Read in values from app_json self.parse_app_json(load_json) except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}".format(zip_path, err_msg)) # Now we have the deets, let's use them self.create_pkginfo() def get_generic_keys(self): ''' Generic keys to get regardless of title ''' # Progress notification self.env['installed_path'] = os.path.join('/Applications', self.env['app_path'], self.env['app_bundle']) self.output("installed_path: {}".format(self.env['installed_path'])) # Get display_name if not self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2021'): self.env['display_name'] = self.env['app_path'] + ' 2021' elif self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2021'): self.env['display_name'] = self.env['app_path'] + ' 2021' else: self.env['display_name'] = self.env['app_path'] # Progress notification self.output("display_name: {}".format(self.env['display_name'])) def read_info_plist(self, my_zip, pimx_dir, xml_tree, zip_path): ''' Try to read info.plist from within zip_bundle ''' # Loop through .pmx's Assets, look for target=[INSTALLDIR], # then grab Assets Source. # Break when found .app/Contents/Info.plist for xml_elem in xml_tree.findall('Assets'): for xml_item in xml_elem.getchildren(): # Below special tweak for the non-Classic Lightroom bundle if (xml_item.attrib['target'].upper().startswith('[INSTALLDIR]') and not xml_item.attrib['target'].endswith('Icons')): # Get bundle_location bundle_location = xml_item.attrib['source'] self.output("bundle_location: {}".format(bundle_location)) else: continue # Amend bundle_location as needed if not bundle_location.startswith('[StagingFolder]'): continue if bundle_location.endswith('Icons') or \ bundle_location.endswith('AMT'): continue bundle_location = bundle_location[16:] # Create zip_bundle if bundle_location.endswith('.app'): zip_bundle = (os.path.join(pimx_dir, bundle_location, 'Contents/Info.plist')) else: zip_bundle = (os.path.join(pimx_dir, bundle_location, self.env['app_bundle'], 'Contents/Info.plist')) # Try to read info.plist from within zip_bundle try: with my_zip.open(zip_bundle) as my_plist: info_plist = my_plist.read() data = load_plist(info_plist) # If the App is Lightroom (Classic or non-Classic) # we need to compare a different value in Info.plist if self.env['sap_code'] == 'LTRM' or \ self.env['sap_code'] == 'LRCC': self.env['vers_compare_key'] = 'CFBundleVersion' else: self.env['vers_compare_key'] = ( 'CFBundleShortVersionString') # Get version from info.plist app_version = data[self.env['vers_compare_key']] # Get bundleid from info.plist self.env['app_bundle_id'] = data['CFBundleIdentifier'] # Progress notifications self.output("vers_compare_key: {}" .format(self.env['vers_compare_key'])) self.output("app_bundle_id: {}" .format(self.env['app_bundle_id'])) self.output("staging_folder: {}" .format(bundle_location)) self.output("staging_folder_path: {}" .format(zip_bundle)) self.env['app_version'] = app_version self.output("app_version: {}".format(self.env['app_version'])) break # If we cannot read the zip file except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}" .format(zip_path, err_msg)) # pylint: disable = too-many-branches, too-many-statements def parse_app_json(self, load_json): ''' Read in values from app_json ''' # We'll override this later if needed self.env['vers_compare_key'] = 'CFBundleShortVersionString' # Get app_version, cautiously for now for only certain apps if self.env['sap_code'] == 'AICY': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.InCopy' elif self.env['sap_code'] == 'CHAR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Character-Animator.application' elif self.env['sap_code'] == 'DRWV': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.dreamweaver-18.1' elif self.env['sap_code'] == 'ESHR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.dimension' elif self.env['sap_code'] == 'FLPR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Adobe-Animate-2021.application' elif self.env['sap_code'] == 'IDSN': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.InDesign' elif self.env['sap_code'] == 'ILST': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.illustrator' elif self.env['sap_code'] == 'KBRG': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.bridge11' elif self.env['sap_code'] == 'LTRM': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.LightroomClassicCC7' self.env['vers_compare_key'] = 'CFBundleVersion' elif self.env['sap_code'] == 'PHSP': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Photoshop' elif self.env['sap_code'] == 'SBSTA': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.adobe-substance-3d-sampler' elif self.env['sap_code'] == 'SBSTD': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.substance-3d-designer' elif self.env['sap_code'] == 'SBSTP': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Adobe-Substance-3D-Painter' elif self.env['sap_code'] == 'SPRK': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.xd' elif self.env['sap_code'] == 'STGR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.stager' else: raise ProcessorError("Checking app_json for version details but sap code {}, " "is not within the known list of apps which we know to " "check via their Application.json".format(self.env['sap_code'])) self.output("app_version: {}".format(self.env['app_version'])) # Get app_bundle for app_launch in load_json['AppLaunch'].split('/'): if app_launch.endswith('.app'): app_bundle = ('/Applications/' + app_launch.split('.app')[0] + '/' + app_launch) self.output("app_bundle: {}".format(app_bundle)) def create_pkginfo(self): ''' Create pkginfo with found details ''' # More var declaration self.env['jss_inventory_name'] = self.env['app_bundle'] self.env['pkg_path'] = self.env['PKG'] self.env['version'] = self.env['app_version'] # Get minimum_os_version from override # https://github.com/autopkg/dataJAR-recipes/issues/138 pkginfo = { 'minimum_os_version': self.env['MINIMUM_OS_VERSION'] } # Allow the user to provide a display_name string that prevents CreativeCloudVersioner # from overriding it. if 'pkginfo' not in self.env or 'display_name' not in self.env['pkginfo']: pkginfo['display_name'] = self.env['display_name'] # Create pkginfo is missing from installs array if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']: pkginfo['installs'] = [{ self.env['vers_compare_key']: self.env['version'], 'path': self.env['installed_path'], 'type': 'application', 'version_comparison_key': self.env['vers_compare_key'], 'CFBundleIdentifier': self.env['app_bundle_id'], }] # Set Processor Architecture info if self.env['architecture_type'] == "x64": pkginfo['supported_architectures'] = [ 'x86_64', ] self.env['architecture_type'] = '-Intel' elif self.env['architecture_type'] == "arm64": pkginfo['supported_architectures'] = [ 'arm64', ] self.env['architecture_type'] = '-ARM' # Notify of additional_pkginfo self.env['additional_pkginfo'] = pkginfo self.output("additional_pkginfo: {}".format(self.env['additional_pkginfo'])) if __name__ == '__main__': PROCESSOR = Adobe2021Versioner() Adobe 2021/Adobe2021Importer.py METASEP #!/usr/local/autopkg/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION Imports Adobe 2021 titles found in running users ~/Downloads ''' # Standard Imports from __future__ import absolute_import from __future__ import print_function import argparse import glob import os import subprocess import sys # Version __version__ = '1.2' # Functions def main(): ''' Look within DOWNLOADS_PATH for Adobe*2021* items, add to adobe_folders list if found ''' # Progress notification print("Looking for {} folders ...".format(os.path.join(DOWNLOADS_PATH, 'Adobe*2021*'))) # Create empty list adobe_folders = [] # Look within DOWNLOADS_PATH for Adobe*2021 items, add to adobe_folders list if found for some_item in os.listdir(DOWNLOADS_PATH): some_path = os.path.join(DOWNLOADS_PATH, some_item) if os.path.isdir(some_path): if some_item.startswith('Adobe') and '2021' in (some_item): adobe_folders.append(some_item) # If no folders are found, exit if not adobe_folders: print("No Adobe*2021 folders found in {}, exiting...".format(DOWNLOADS_PATH)) sys.exit(1) # If 1 or moe folders are found, notify and proceed. if len(adobe_folders) == 1: print("1 Adobe 2021 folder found, creating recipe list...") else: print("{} Adobe 2021 folder found, creating recipe list...".format(len(adobe_folders))) # Check for pkg's pkg_checker(sorted(adobe_folders)) def pkg_checker(adobe_folders): ''' Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do ''' # Progress notification print("Looking for pkgs...") # count var found_pkgs = 0 # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg, for adobe_folder in adobe_folders: # var declaration install_pkg = None uninstall_pkg = None adobe_build_folder_path = os.path.join(DOWNLOADS_PATH, adobe_folder, 'Build') # Look for *_Install.pkg try: install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0] print("Found {}...".format(install_pkg)) except IndexError: print("Cannot find *_Install.pkg within: {}...".format(adobe_build_folder_path)) # Look for *_Uninstall.pkg try: uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0] print("Found {}...".format(uninstall_pkg)) except IndexError: print("Cannot find *_Uninstall.pkg within: {}...".format(adobe_build_folder_path)) # If we can find both *_Install.pkg and *_Uninstall.pkg, add to ADOBE_LIST if install_pkg and uninstall_pkg: # Increment count found_pkgs += 1 # Append to ADOBE_LIST create_list(adobe_folder, found_pkgs) else: print("Cannot find both an *_Install.pkg and *_Uninstall.pkg for {}... " "Skipping...".format(adobe_folder)) # If we did not find any pkg pairs to import if found_pkgs == 0: print("ERROR: No Adobe 2021 pkg pairs found, exiting...") sys.exit(1) # Else, run the recipe list ADOBE_LIST else: run_list() def create_list(adobe_folder, found_pkgs): ''' Create recipe list ''' # Create an empty file at ADOBE_List, if this is the 1st found pkg if found_pkgs == 1: open(ADOBE_LIST, 'w').close() # var declaration library_dir = os.path.expanduser('~/Library/') override_path = os.path.join(library_dir, 'AutoPkg', 'RecipeOverrides', \ adobe_folder + '.' \ + RECIPE_TYPE + '.recipe') override_name = 'local.' + RECIPE_TYPE + '.' + adobe_folder # If we cannot find the override if not os.path.isfile(override_path): print("Skipping {}, as cannot find override...".format(override_path)) return # Append to ADOBE_LIST list_file = open(ADOBE_LIST, 'a+') list_file.write(override_name + '\n') list_file.close() def run_list(): ''' Run recipe list ''' # Notify we're starting print("Running recipe_list: `{}`".format(ADOBE_LIST)) print() # The subprocess command cmd_args = ['/usr/local/bin/autopkg', 'run', '-v', '--recipe-list', ADOBE_LIST, '--report-plist', REPORT_PATH] # Notify what command we're about to run. print('Running `{}`...'.format(cmd_args)) # Run the command subprocess.call(cmd_args) if __name__ == '__main__': # Parse recipe type argument PARSER = argparse.ArgumentParser() PARSER.add_argument('type', type=str, help='Recipe type, either "munki" or "jss"') ARG_PARSER = PARSER.parse_args() RECIPE_TYPE = ARG_PARSER.type.lower() # Constants DOWNLOADS_PATH = os.path.expanduser('~/Downloads/') ADOBE_LIST = os.path.join(DOWNLOADS_PATH + 'adobe2021_list.txt') REPORT_PATH = os.path.join(DOWNLOADS_PATH + 'adobe2021_report.plist') # Call main def main() Adobe 2020/Adobe2020Versioner.py METASEP #!/usr/local/autopkg/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for AdobeCC2020Versioner class ''' # Standard Imports from __future__ import absolute_import import json import os import re import xml import zipfile from xml.etree import ElementTree # AutoPkg imports # pylint: disable = import-error try: from plistlib import loads as load_plist except ImportError: from FoundationPlist import readPlistFromString as load_plist from autopkglib import Processor, ProcessorError # Define class __all__ = ['Adobe2020Versioner'] __version__ = ['1.4.1'] # Class def class Adobe2020Versioner(Processor): ''' Parses generated Adobe Admin Console 2020 pkgs for detailed application path and bundle version info. ''' description = __doc__ input_variables = { } output_variables = { 'additional_pkginfo': { 'description': 'Some pkginfo fields extracted from the Adobe metadata.', }, 'jss_inventory_name': { 'description': 'Application title for jamf pro smart group criteria.', }, 'version': { 'description': ('The value of CFBundleShortVersionString for the app bundle. ' 'This may match user_facing_version, but it may also be more ' 'specific and add another version component.'), }, } def main(self): ''' Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise if corresponding *_Uninstall.pkg is missing. Then determine a pkginfo, version and jss inventory name from the Adobe*_Install.pkg ''' # var declaration download_path = os.path.expanduser('~/Downloads') install_lang = None # Path to Adobe*_Install.pkg in the titles Downloads folder self.env['PKG'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Install.pkg')) self.output("install_pkg {}".format(self.env['PKG'])) # Path to Adobe*_Uninstall.pkg n the titles Downloads folder self.env['uninstaller_pkg_path'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Uninstall.pkg')) self.output("uninstall_pkg {}".format(self.env['uninstaller_pkg_path'])) # Path to titles optionXML.xml option_xml_path = os.path.join(self.env['PKG'], 'Contents', 'Resources', 'optionXML.xml') self.output("Processing {}".format(option_xml_path)) # Try to parse option_xml, raise if an issue try: option_xml = ElementTree.parse(option_xml_path) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(option_xml_path, err_msg)) # Check to see if HDMedia keys set for hd_media in option_xml.findall('.//HDMedias/HDMedia'): # If we have HDMedia, set vars if hd_media.findtext('MediaType') == 'Product': install_lang = hd_media.findtext('installLang') self.env['sap_code'] = hd_media.findtext('SAPCode') self.env['target_folder'] = hd_media.findtext('TargetFolderName') # If no HDMedia is found, then install_lang will be none if install_lang is None: # Get vars for RIBS media for ribs_media in option_xml.findall('.//Medias/Media'): install_lang = ribs_media.findtext('installLang') self.env['sap_code'] = ribs_media.findtext('SAPCode') self.env['target_folder'] = ribs_media.findtext('TargetFolderName') # Display progress self.output("sap_code: {}".format(self.env['sap_code'])) self.output("target_folder: {}".format(self.env['target_folder'])) # Get app_json var self.env['app_json'] = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], 'Application.json') # If Application.json exists, we're looking at a HD installer if os.path.exists(self.env['app_json']): if not self.env['sap_code'] == 'APRO': # Process HD installer self.process_hd_installer_pt1() else: # If not a HD installer Acrobat is a 'current' title with a # RIBS PKG installer we can extract needed metadata from self.env['proxy_xml'] = (os.path.join(self.env['PKG'], 'Contents/Resources/Setup', self.env['target_folder'], 'proxy.xml')) # If proxy_xml does not exist, raise if not os.path.exists(self.env['proxy_xml']): raise ProcessorError("APRO selected, proxy.xml not found at: {}" .format(self.env['proxy_xml'])) # Else, process the APRO (Acrobat) installer self.process_apro_installer() def process_apro_installer(self): ''' Process APRO (Acrobat) installer ''' # Progress notification self.output("Processing Acrobat installer") self.output("proxy_xml: {}".format(self.env['proxy_xml'])) # Try to parse proxy_xml, raise if an issue try: parse_xml = ElementTree.parse(self.env['proxy_xml']) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(self.env['proxy_xml'], err_msg)) # Get root of xml root = parse_xml.getroot() # Get app_bundle app_bundle_text = (root.findtext ('./ThirdPartyComponent/Metadata/Properties/Property[@name=\'path\']')) self.env['app_bundle'] = app_bundle_text.split('/')[1] self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path app_path_text = root.findtext('./InstallDir/Platform') self.env['app_path'] = app_path_text.split('/')[1] self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # Get app_version self.env['app_version'] = root.findtext('./InstallerProperties/Property[@name=\'ProductVersion\']') self.output("app_version: {}".format(self.env['app_version'])) # Get vers_compare_key self.env['vers_compare_key'] = 'CFBundleShortVersionString' self.output("vers_compare_key: {}".format(self.env['vers_compare_key'])) # Set bundle id self.env['app_bundle_id'] = 'com.adobe.Acrobat.Pro' self.output("app_bundle_id: {}".format(self.env['app_bundle_id'])) # Create pkginfo with found details self.create_pkginfo() def process_hd_installer_pt1(self): ''' Process HD installer - part 1 ''' # Progress notification self.output("Processing HD installer") # Read in app_json file with open(self.env['app_json']) as json_file: # Try to parse app_json as json, raise if an issue try: load_json = json.load(json_file) except json.JSONDecodeError as err_msg: raise ProcessorError("Failed to parse {}: {}".format(self.env['app_json'], err_msg)) # Get app_launch app_launch = load_json['AppLaunch'] self.output("app_launch: {}".format(app_launch)) # Get app_details, app_bundle and app_path app_details = list(re.split('/', app_launch)) if app_details[2].endswith('.app'): app_bundle = app_details[2] app_path = app_details[1] else: app_bundle = app_details[1] app_path = list(re.split('/', (load_json['InstallDir']['value'])))[1] # Get app_bundle self.env['app_bundle'] = app_bundle self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path self.env['app_path'] = app_path self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # 2nd part of process self.process_hd_installer_pt2(load_json) def process_hd_installer_pt2(self, load_json): ''' Process HD installer - part 2 ''' # Get name of the zip_file were to open zip_file = load_json['Packages']['Package'][0]['PackageName'] self.output("zip_file: {}".format(zip_file)) # Get pimx_dir if zip_file.endswith('-LearnPanel'): zip_file = load_json['Packages']['Package'][1]['PackageName'] pimx_dir = '2' else: pimx_dir = '1' self.output("pimx_dir: {}".format(pimx_dir)) # Get zip_path zip_path = (os.path.join(self.env['PKG'], 'Contents/Resources/HD', self.env['target_folder'], zip_file + '.zip')) self.output("zip_path: {}".format(zip_path)) # Open zip file, raise if fails try: with zipfile.ZipFile(zip_path, mode='r') as my_zip: # Read in pimx file with my_zip.open(zip_file + '.pimx') as my_txt: # Read in pimx file pimx_txt = my_txt.read() # Try to parse pimx file as XML, raise exception if fails try: xml_tree = ElementTree.fromstring(pimx_txt) # Try to read info.plist from within zip_bundle self.read_info_plist(my_zip, pimx_dir, xml_tree, zip_path) # If we cannot read in the pimx except xml.etree.ElementTree.ParseError as err_msg: self.output("Parsing {} failed with: {}, checking {}" .format(zip_file, err_msg, self.env['app_json'])) # Read in values from app_json self.parse_app_json(load_json) except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}".format(zip_path, err_msg)) # Now we have the deets, let's use them self.create_pkginfo() def get_generic_keys(self): ''' Generic keys to get regardless of title ''' # Progress notification self.env['installed_path'] = os.path.join('/Applications', self.env['app_path'], self.env['app_bundle']) self.output("installed_path: {}".format(self.env['installed_path'])) # Get display_name if not self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2020'): self.env['display_name'] = self.env['app_path'] + ' 2020' elif self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2020'): self.env['display_name'] = self.env['app_path'] + ' 2020' else: self.env['display_name'] = self.env['app_path'] # Progress notification self.output("display_name: {}".format(self.env['display_name'])) def read_info_plist(self, my_zip, pimx_dir, xml_tree, zip_path): ''' Try to read info.plist from within zip_bundle ''' # Loop through .pmx's Assets, look for target=[INSTALLDIR], # then grab Assets Source. # Break when found .app/Contents/Info.plist for xml_elem in xml_tree.findall('Assets'): for xml_item in xml_elem.getchildren(): # Below special tweak for the non-Classic Lightroom bundle if (xml_item.attrib['target'].upper().startswith('[INSTALLDIR]') and not xml_item.attrib['target'].endswith('Icons')): # Get bundle_location bundle_location = xml_item.attrib['source'] self.output("bundle_location: {}".format(bundle_location)) else: continue # Amend bundle_location as needed if not bundle_location.startswith('[StagingFolder]'): continue if bundle_location.endswith('Icons') or \ bundle_location.endswith('AMT'): continue bundle_location = bundle_location[16:] # Create zip_bundle if bundle_location.endswith('.app'): zip_bundle = (os.path.join(pimx_dir, bundle_location, 'Contents/Info.plist')) else: zip_bundle = (os.path.join(pimx_dir, bundle_location, self.env['app_bundle'], 'Contents/Info.plist')) # Try to read info.plist from within zip_bundle try: with my_zip.open(zip_bundle) as my_plist: info_plist = my_plist.read() data = load_plist(info_plist) # If the App is Lightroom (Classic or non-Classic) # we need to compare a different value in Info.plist if self.env['sap_code'] == 'LTRM' or \ self.env['sap_code'] == 'LRCC': self.env['vers_compare_key'] = 'CFBundleVersion' else: self.env['vers_compare_key'] = ( 'CFBundleShortVersionString') # Get version from info.plist app_version = data[self.env['vers_compare_key']] # Get bundleid from info.plist self.env['app_bundle_id'] = data['CFBundleIdentifier'] # Progress notifications self.output("vers_compare_key: {}" .format(self.env['vers_compare_key'])) self.output("app_bundle_id: {}" .format(self.env['app_bundle_id'])) self.output("staging_folder: {}" .format(bundle_location)) self.output("staging_folder_path: {}" .format(zip_bundle)) self.env['app_version'] = app_version self.output("app_version: {}".format(self.env['app_version'])) break # If we cannot read the zip file except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}" .format(zip_path, err_msg)) def parse_app_json(self, load_json): ''' Read in values from app_json ''' # Get vers_compare_key self.env['vers_compare_key'] = 'CFBundleShortVersionString' # Get app_version, cautiously for now for only certain apps if self.env['sap_code'] == 'KBRG': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.bridge10' elif self.env['sap_code'] == 'ESHR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.dimension' else: raise ProcessorError("Checking app_json for version details but sap code {}," "is neither ESHR nor KBRG".format(self.env['sap_code'])) self.output("app_version: {}".format(self.env['app_version'])) # Get app_bundle for app_launch in load_json['AppLaunch'].split('/'): if app_launch.endswith('.app'): app_bundle = ('/Applications/' + app_launch.split('.app')[0] + '/' + app_launch) self.output("app_bundle: {}".format(app_bundle)) def create_pkginfo(self): ''' Create pkginfo with found details ''' # More var declaration self.env['jss_inventory_name'] = self.env['app_bundle'] self.env['pkg_path'] = self.env['PKG'] self.env['version'] = self.env['app_version'] # Get minimum_os_version from override # https://github.com/autopkg/dataJAR-recipes/issues/138 pkginfo = { 'minimum_os_version': self.env['MINIMUM_OS_VERSION'] } # Allow the user to provide a display_name string that prevents CreativeCloudVersioner # from overriding it. if 'pkginfo' not in self.env or 'display_name' not in self.env['pkginfo']: pkginfo['display_name'] = self.env['display_name'] # Create pkginfo is missing from installs array if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']: pkginfo['installs'] = [{ self.env['vers_compare_key']: self.env['version'], 'path': self.env['installed_path'], 'type': 'application', 'version_comparison_key': self.env['vers_compare_key'], 'CFBundleIdentifier': self.env['app_bundle_id'], }] # Notify of additional_pkginfo self.env['additional_pkginfo'] = pkginfo self.output("additional_pkginfo: {}".format(self.env['additional_pkginfo'])) if __name__ == '__main__': PROCESSOR = Adobe2020Versioner() Adobe 2020/Adobe2020Importer.py METASEP #!/usr/bin/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION Imports Adobe 2020 titles found in running users ~/Downloads ''' from __future__ import absolute_import from __future__ import print_function import argparse import glob import os import subprocess import sys __version__ = '1.1' def main(): '''Gimme some main''' adobe_folders = [] for some_item in os.listdir(DOWNLOADS_PATH): some_path = os.path.join(DOWNLOADS_PATH, some_item) if os.path.isdir(some_path): if some_item.startswith('Adobe') and some_item.endswith('2020'): adobe_folders.append(some_item) if not len(adobe_folders): print('No Adobe*2020 folders found in %s, exiting...' % DOWNLOADS_PATH) sys.exit(1) if len(adobe_folders) == 1: print('1 Adobe 2020 folder found, creating recipe list...') else: print('%s Adobe 2020 folder found, creating recipe list...' % len(adobe_folders)) open(ADOBE_LIST, 'w').close() pkg_checker(adobe_folders) def pkg_checker(adobe_folders): ''' Check that we have the Install_pkg's & proceed if we do''' found_pkgs = 0 print('Looking for pkgs...') for adobe_folder in sorted(adobe_folders): try: install_pkg = glob.glob(os.path.join(DOWNLOADS_PATH, adobe_folder, \ 'Build', '*_Install.pkg'))[0] print('Found {0}...'.format(install_pkg)) if os.path.exists(install_pkg): create_list(adobe_folder) found_pkgs += 1 else: print('Cannot find pkg ({0}), for {1}... Skipping...'.format\ (install_pkg, adobe_folder)) except IndexError as err_msg: print('Skipping {0}, as cannot find Install.pkg: {1}...'.format(adobe_folder, err_msg)) if found_pkgs == 0: print('No pkgs found, exiting...') sys.exit(1) else: run_list() def create_list(adobe_folder): ''' Create recipe list ''' library_dir = os.path.expanduser('~/Library/') override_path = os.path.join(library_dir, 'AutoPkg', 'RecipeOverrides', \ adobe_folder + '.' \ + RECIPE_TYPE + '.recipe') override_name = 'local.' + RECIPE_TYPE + '.' + adobe_folder if not os.path.isfile(override_path): print('Skipping {0}, as cannot find override...'.format(override_path)) return list_file = open(ADOBE_LIST, 'a+') list_file.write(override_name + '\n') list_file.close() def run_list(): '''Run recipe list''' if os.path.exists(ADOBE_LIST): print('Running recipe_list: `{0}`'.format(ADOBE_LIST)) print() cmd_args = ['/usr/local/bin/autopkg', 'run', '-v', '--recipe-list', ADOBE_LIST, \ '--report-plist', REPORT_PATH] print('Running `{0}`...'.format(cmd_args)) subprocess.call(cmd_args) else: print('Recipe list not populated, make sure you have the needed overrides in place....') if __name__ == '__main__': # Try to locate autopkg if not os.path.exists('/usr/local/bin/autopkg'): print('Cannot find autopkg') sys.exit(1) # Parse recipe type argument PARSER = argparse.ArgumentParser() PARSER.add_argument('type', type=str, help='Recipe type, either "munki" or "jss"') ARG_PARSER = PARSER.parse_args() RECIPE_TYPE = ARG_PARSER.type.lower() # Constants DOWNLOADS_PATH = os.path.expanduser('~/Downloads/') ADOBE_LIST = os.path.join(DOWNLOADS_PATH + 'adobe2020_list.txt') REPORT_PATH = os.path.join(DOWNLOADS_PATH + 'adobe2020_report.plist') main() Adobe Admin Console Packages/AdobeAdminConsolePackagesImporter.py METASEP
[ { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do\n '''\n\n # Progress notification\n print(\"Looking for pkgs...\")\n\n # Count var\n found_pkgs = 0\n\n # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg,\n for adobe_folder in adobe_folders:\n\n # Var declaration\n install_pkg = None\n uninstall_pkg = None\n adobe_build_folder_path = os.path.join(packages_path, adobe_folder, 'Build')\n recipe_list_path = os.path.join(packages_path + 'adobe_admin_console_recipes_list.txt')\n report_path = os.path.join(packages_path + 'adobe_admin_console_recipes_report.plist')\n\n # Progress notification\n print(f\"Checking {adobe_build_folder_path}...\")\n\n if not os.path.isdir(adobe_build_folder_path):\n print(f\"No Build dir at {adobe_build_folder_path}... skipping...\")\n else:\n print(f\"Found Build dir at {adobe_build_folder_path}...\")\n # Look for *_Install.pkg\n try:\n install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0]\n print(f\"Found {install_pkg}...\")\n except IndexError:\n print(f\"Cannot find *_Install.pkg within: {adobe_build_folder_path}...\")\n\n # Look for *_Uninstall.pkg\n try:\n uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path,\n '*_Uninstall.pkg'))[0]\n print(f\"Found {uninstall_pkg}...\")\n except IndexError:\n print(\"Cannot find *_Uninstall.pkg within: {adobe_build_folder_path}...\")\n\n # If we can find both *_Install.pkg and *_Uninstall.pkg, add to recipe_list_path\n if install_pkg and uninstall_pkg:\n # Increment count\n found_pkgs += 1\n # Append to recipe_list_path", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do\n '''\n\n # Progress notification\n print(\"Looking for pkgs...\")\n\n # Count var\n found_pkgs = 0\n\n # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg,\n for adobe_folder in adobe_folders:\n\n # Var declaration\n install_pkg = None\n uninstall_pkg = None\n adobe_build_folder_path = os.path.join(packages_path, adobe_folder, 'Build')\n recipe_list_path = os.path.join(packages_path + 'adobe_admin_console_recipes_list.txt')\n report_path = os.path.join(packages_path + 'adobe_admin_console_recipes_report.plist')\n\n # Progress notification\n print(f\"Checking {adobe_build_folder_path}...\")\n\n if not os.path.isdir(adobe_build_folder_path):\n print(f\"No Build dir at {adobe_build_folder_path}... skipping...\")\n else:\n print(f\"Found Build dir at {adobe_build_folder_path}...\")\n # Look for *_Install.pkg\n try:\n install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0]\n print(f\"Found {install_pkg}...\")\n except IndexError:\n print(f\"Cannot find *_Install.pkg within: {adobe_build_folder_path}...\")\n\n # Look for *_Uninstall.pkg\n try:\n uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path,\n '*_Uninstall.pkg'))[0]\n print(f\"Found {uninstall_pkg}...\")\n except IndexError:\n print(\"Cannot find *_Uninstall.pkg within: {adobe_build_folder_path}...\")\n\n # If we can find both *_Install.pkg and *_Uninstall.pkg, add to recipe_list_path\n if install_pkg and uninstall_pkg:\n # Increment count\n found_pkgs += 1\n # Append to recipe_list_path\n create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type)\n else:\n print(f\"ERROR: Cannot find {adobe_folder}, these recipes need packages of the \"\n \"Managed Package variety, which include _Install and _Uninstall packages\"\n \".... skipping...\")\n\n # If we did not find any pkg pairs to import, exit\n if found_pkgs == 0:\n print(\"ERROR: No Adobe pkg pairs found, exiting...\")\n sys.exit(1)\n\n # Run recipe list", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do\n '''\n\n # Progress notification\n print(\"Looking for pkgs...\")\n\n # Count var\n found_pkgs = 0\n\n # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg,\n for adobe_folder in adobe_folders:\n\n # Var declaration\n install_pkg = None\n uninstall_pkg = None\n adobe_build_folder_path = os.path.join(packages_path, adobe_folder, 'Build')\n recipe_list_path = os.path.join(packages_path + 'adobe_admin_console_recipes_list.txt')\n report_path = os.path.join(packages_path + 'adobe_admin_console_recipes_report.plist')\n\n # Progress notification\n print(f\"Checking {adobe_build_folder_path}...\")\n\n if not os.path.isdir(adobe_build_folder_path):\n print(f\"No Build dir at {adobe_build_folder_path}... skipping...\")\n else:\n print(f\"Found Build dir at {adobe_build_folder_path}...\")\n # Look for *_Install.pkg\n try:\n install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0]\n print(f\"Found {install_pkg}...\")\n except IndexError:\n print(f\"Cannot find *_Install.pkg within: {adobe_build_folder_path}...\")\n\n # Look for *_Uninstall.pkg\n try:\n uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path,\n '*_Uninstall.pkg'))[0]\n print(f\"Found {uninstall_pkg}...\")\n except IndexError:\n print(\"Cannot find *_Uninstall.pkg within: {adobe_build_folder_path}...\")\n\n # If we can find both *_Install.pkg and *_Uninstall.pkg, add to recipe_list_path\n if install_pkg and uninstall_pkg:\n # Increment count\n found_pkgs += 1\n # Append to recipe_list_path\n create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type)\n else:\n print(f\"ERROR: Cannot find {adobe_folder}, these recipes need packages of the \"\n \"Managed Package variety, which include _Install and _Uninstall packages\"\n \".... skipping...\")\n\n # If we did not find any pkg pairs to import, exit\n if found_pkgs == 0:\n print(\"ERROR: No Adobe pkg pairs found, exiting...\")\n sys.exit(1)\n\n # Run recipe list\n run_list(recipe_list_path, report_path)\n\n\ndef create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type):\n '''\n Create recipe list\n '''\n\n # Var declaration\n override_path = None\n\n # Look for recipes in override_dirs\n for override_dir in override_dirs:\n recipe_files = os.listdir(override_dir)\n for recipe_file in recipe_files:\n if recipe_file.startswith(adobe_folder) and recipe_type in recipe_file:\n override_path = os.path.join(override_dir, recipe_file)\n\n if not override_path:\n # Return when we cannot find a matching override\n print(f\"Cannot find override starting with: {adobe_folder}, skipping...\")\n return\n print(f\"Found override at: {override_path}, proceeding...\")\n\n # Create an empty file at recipe_list_path, if this is the 1st found pkg\n if found_pkgs == 1:\n with open(recipe_list_path, 'w', encoding='utf-8') as new_file:\n new_file.write('')\n\n # Retrieve override name from file\n # Borrowed with <3 from:\n # https://github.com/autopkg/autopkg/blob/405c913deab15042819e2f77f1587a805b7c1ada/Code/autopkglib/__init__.py#L341-L359\n if override_path.endswith(\".yaml\"):\n try:\n # try to read it as yaml\n with open (override_path, 'rb') as read_file:", "type": "common" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do\n '''\n\n # Progress notification\n print(\"Looking for pkgs...\")\n\n # Count var\n found_pkgs = 0\n\n # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg,\n for adobe_folder in adobe_folders:\n\n # Var declaration\n install_pkg = None\n uninstall_pkg = None\n adobe_build_folder_path = os.path.join(packages_path, adobe_folder, 'Build')\n recipe_list_path = os.path.join(packages_path + 'adobe_admin_console_recipes_list.txt')\n report_path = os.path.join(packages_path + 'adobe_admin_console_recipes_report.plist')\n\n # Progress notification\n print(f\"Checking {adobe_build_folder_path}...\")\n\n if not os.path.isdir(adobe_build_folder_path):\n print(f\"No Build dir at {adobe_build_folder_path}... skipping...\")\n else:\n print(f\"Found Build dir at {adobe_build_folder_path}...\")\n # Look for *_Install.pkg\n try:\n install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0]\n print(f\"Found {install_pkg}...\")\n except IndexError:\n print(f\"Cannot find *_Install.pkg within: {adobe_build_folder_path}...\")\n\n # Look for *_Uninstall.pkg\n try:\n uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path,\n '*_Uninstall.pkg'))[0]\n print(f\"Found {uninstall_pkg}...\")\n except IndexError:\n print(\"Cannot find *_Uninstall.pkg within: {adobe_build_folder_path}...\")\n\n # If we can find both *_Install.pkg and *_Uninstall.pkg, add to recipe_list_path\n if install_pkg and uninstall_pkg:\n # Increment count\n found_pkgs += 1\n # Append to recipe_list_path\n create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type)\n else:\n print(f\"ERROR: Cannot find {adobe_folder}, these recipes need packages of the \"\n \"Managed Package variety, which include _Install and _Uninstall packages\"\n \".... skipping...\")\n\n # If we did not find any pkg pairs to import, exit\n if found_pkgs == 0:\n print(\"ERROR: No Adobe pkg pairs found, exiting...\")\n sys.exit(1)\n\n # Run recipe list\n run_list(recipe_list_path, report_path)\n\n\ndef create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type):\n '''\n Create recipe list\n '''\n\n # Var declaration\n override_path = None\n\n # Look for recipes in override_dirs\n for override_dir in override_dirs:\n recipe_files = os.listdir(override_dir)\n for recipe_file in recipe_files:\n if recipe_file.startswith(adobe_folder) and recipe_type in recipe_file:\n override_path = os.path.join(override_dir, recipe_file)\n\n if not override_path:\n # Return when we cannot find a matching override\n print(f\"Cannot find override starting with: {adobe_folder}, skipping...\")\n return\n print(f\"Found override at: {override_path}, proceeding...\")\n\n # Create an empty file at recipe_list_path, if this is the 1st found pkg\n if found_pkgs == 1:\n with open(recipe_list_path, 'w', encoding='utf-8') as new_file:\n new_file.write('')\n\n # Retrieve override name from file\n # Borrowed with <3 from:\n # https://github.com/autopkg/autopkg/blob/405c913deab15042819e2f77f1587a805b7c1ada/Code/autopkglib/__init__.py#L341-L359\n if override_path.endswith(\".yaml\"):\n try:\n # try to read it as yaml\n with open (override_path, 'rb') as read_file:\n recipe_dict = yaml.load(read_file, Loader=yaml.FullLoader)\n override_name = recipe_dict[\"Identifier\"]\n # pylint: disable = broad-except\n except Exception as err_msg:\n print(f\"ERROR: yaml error for {override_path}: {err_msg}\")\n return\n try:\n # try to read it as a plist\n with open (override_path, 'rb') as read_file:", "type": "common" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do\n '''\n\n # Progress notification\n print(\"Looking for pkgs...\")\n\n # Count var\n found_pkgs = 0\n\n # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg,\n for adobe_folder in adobe_folders:\n\n # Var declaration\n install_pkg = None\n uninstall_pkg = None\n adobe_build_folder_path = os.path.join(packages_path, adobe_folder, 'Build')\n recipe_list_path = os.path.join(packages_path + 'adobe_admin_console_recipes_list.txt')\n report_path = os.path.join(packages_path + 'adobe_admin_console_recipes_report.plist')\n\n # Progress notification\n print(f\"Checking {adobe_build_folder_path}...\")\n\n if not os.path.isdir(adobe_build_folder_path):\n print(f\"No Build dir at {adobe_build_folder_path}... skipping...\")\n else:\n print(f\"Found Build dir at {adobe_build_folder_path}...\")\n # Look for *_Install.pkg\n try:\n install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0]\n print(f\"Found {install_pkg}...\")\n except IndexError:\n print(f\"Cannot find *_Install.pkg within: {adobe_build_folder_path}...\")\n\n # Look for *_Uninstall.pkg\n try:\n uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path,\n '*_Uninstall.pkg'))[0]\n print(f\"Found {uninstall_pkg}...\")\n except IndexError:\n print(\"Cannot find *_Uninstall.pkg within: {adobe_build_folder_path}...\")\n\n # If we can find both *_Install.pkg and *_Uninstall.pkg, add to recipe_list_path\n if install_pkg and uninstall_pkg:\n # Increment count\n found_pkgs += 1\n # Append to recipe_list_path\n create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type)\n else:\n print(f\"ERROR: Cannot find {adobe_folder}, these recipes need packages of the \"\n \"Managed Package variety, which include _Install and _Uninstall packages\"\n \".... skipping...\")\n\n # If we did not find any pkg pairs to import, exit\n if found_pkgs == 0:\n print(\"ERROR: No Adobe pkg pairs found, exiting...\")\n sys.exit(1)\n\n # Run recipe list\n run_list(recipe_list_path, report_path)\n\n\ndef create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type):\n '''\n Create recipe list\n '''\n\n # Var declaration\n override_path = None\n\n # Look for recipes in override_dirs\n for override_dir in override_dirs:\n recipe_files = os.listdir(override_dir)\n for recipe_file in recipe_files:\n if recipe_file.startswith(adobe_folder) and recipe_type in recipe_file:\n override_path = os.path.join(override_dir, recipe_file)\n\n if not override_path:\n # Return when we cannot find a matching override\n print(f\"Cannot find override starting with: {adobe_folder}, skipping...\")\n return\n print(f\"Found override at: {override_path}, proceeding...\")\n\n # Create an empty file at recipe_list_path, if this is the 1st found pkg\n if found_pkgs == 1:\n with open(recipe_list_path, 'w', encoding='utf-8') as new_file:\n new_file.write('')\n\n # Retrieve override name from file\n # Borrowed with <3 from:\n # https://github.com/autopkg/autopkg/blob/405c913deab15042819e2f77f1587a805b7c1ada/Code/autopkglib/__init__.py#L341-L359\n if override_path.endswith(\".yaml\"):\n try:\n # try to read it as yaml\n with open (override_path, 'rb') as read_file:\n recipe_dict = yaml.load(read_file, Loader=yaml.FullLoader)\n override_name = recipe_dict[\"Identifier\"]\n # pylint: disable = broad-except\n except Exception as err_msg:\n print(f\"ERROR: yaml error for {override_path}: {err_msg}\")\n return\n try:\n # try to read it as a plist\n with open (override_path, 'rb') as read_file:\n recipe_dict = plistlib.load(read_file)\n override_name = recipe_dict[\"Identifier\"]\n # pylint: disable = broad-except\n except Exception as err_msg:\n print(f\"ERROR: plist error for {override_path}: {err_msg}\")\n return\n\n print(f\"Adding {override_path}, to {recipe_list_path} with identifier: {override_name}...\")\n\n # Append to recipe_list_path\n with open(recipe_list_path, 'a+', encoding='utf-8') as list_file:\n list_file.write(override_name + '\\n')\n\n\ndef run_list(recipe_list_path, report_path):\n '''\n Run recipe list\n '''\n\n # Check that the recipe_list file has content before proceeding\n with open (recipe_list_path, encoding='utf-8') as recipe_list_file:\n content_test = recipe_list_file.readlines()\n if not content_test:\n print(f\"{recipe_list_path} is empty, no overrides found... exiting ...\")\n sys.exit(1)\n\n # Notify we're starting\n print(f\"Running recipe_list: `{recipe_list_path}`\")\n\n # The subprocess command\n cmd_args = ['/usr/local/bin/autopkg', 'run', '-vv', '--recipe-list', recipe_list_path,\n '--report-plist', report_path, '--ignore-parent-trust-verification-errors']\n\n # Notify what command we're about to run.\n print(f\"Running: `{cmd_args}`...\")\n\n # Run the command\n subprocess.call(cmd_args)\n\n\n\nif __name__ == '__main__':\n\n # Gimme some main", "type": "commited" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do", "type": "non_informative" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do\n '''\n\n # Progress notification\n print(\"Looking for pkgs...\")\n\n # Count var\n found_pkgs = 0\n\n # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg,\n for adobe_folder in adobe_folders:\n\n # Var declaration\n install_pkg = None\n uninstall_pkg = None\n adobe_build_folder_path = os.path.join(packages_path, adobe_folder, 'Build')\n recipe_list_path = os.path.join(packages_path + 'adobe_admin_console_recipes_list.txt')\n report_path = os.path.join(packages_path + 'adobe_admin_console_recipes_report.plist')\n\n # Progress notification\n print(f\"Checking {adobe_build_folder_path}...\")\n\n if not os.path.isdir(adobe_build_folder_path):\n print(f\"No Build dir at {adobe_build_folder_path}... skipping...\")\n else:\n print(f\"Found Build dir at {adobe_build_folder_path}...\")\n # Look for *_Install.pkg\n try:\n install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0]\n print(f\"Found {install_pkg}...\")\n except IndexError:\n print(f\"Cannot find *_Install.pkg within: {adobe_build_folder_path}...\")\n\n # Look for *_Uninstall.pkg\n try:\n uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path,\n '*_Uninstall.pkg'))[0]\n print(f\"Found {uninstall_pkg}...\")\n except IndexError:\n print(\"Cannot find *_Uninstall.pkg within: {adobe_build_folder_path}...\")\n\n # If we can find both *_Install.pkg and *_Uninstall.pkg, add to recipe_list_path\n if install_pkg and uninstall_pkg:\n # Increment count\n found_pkgs += 1\n # Append to recipe_list_path\n create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type)\n else:\n print(f\"ERROR: Cannot find {adobe_folder}, these recipes need packages of the \"\n \"Managed Package variety, which include _Install and _Uninstall packages\"\n \".... skipping...\")\n\n # If we did not find any pkg pairs to import, exit\n if found_pkgs == 0:\n print(\"ERROR: No Adobe pkg pairs found, exiting...\")\n sys.exit(1)\n\n # Run recipe list\n run_list(recipe_list_path, report_path)\n\n\ndef create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type):\n '''\n Create recipe list\n '''\n\n # Var declaration\n override_path = None\n\n # Look for recipes in override_dirs\n for override_dir in override_dirs:\n recipe_files = os.listdir(override_dir)\n for recipe_file in recipe_files:\n if recipe_file.startswith(adobe_folder) and recipe_type in recipe_file:\n override_path = os.path.join(override_dir, recipe_file)\n\n if not override_path:\n # Return when we cannot find a matching override\n print(f\"Cannot find override starting with: {adobe_folder}, skipping...\")\n return\n print(f\"Found override at: {override_path}, proceeding...\")\n\n # Create an empty file at recipe_list_path, if this is the 1st found pkg\n if found_pkgs == 1:\n with open(recipe_list_path, 'w', encoding='utf-8') as new_file:\n new_file.write('')\n\n # Retrieve override name from file\n # Borrowed with <3 from:\n # https://github.com/autopkg/autopkg/blob/405c913deab15042819e2f77f1587a805b7c1ada/Code/autopkglib/__init__.py#L341-L359\n if override_path.endswith(\".yaml\"):\n try:\n # try to read it as yaml\n with open (override_path, 'rb') as read_file:\n recipe_dict = yaml.load(read_file, Loader=yaml.FullLoader)\n override_name = recipe_dict[\"Identifier\"]\n # pylint: disable = broad-except\n except Exception as err_msg:\n print(f\"ERROR: yaml error for {override_path}: {err_msg}\")\n return\n try:\n # try to read it as a plist\n with open (override_path, 'rb') as read_file:\n recipe_dict = plistlib.load(read_file)\n override_name = recipe_dict[\"Identifier\"]\n # pylint: disable = broad-except\n except Exception as err_msg:\n print(f\"ERROR: plist error for {override_path}: {err_msg}\")\n return\n\n print(f\"Adding {override_path}, to {recipe_list_path} with identifier: {override_name}...\")\n\n # Append to recipe_list_path\n with open(recipe_list_path, 'a+', encoding='utf-8') as list_file:\n list_file.write(override_name + '\\n')\n\n\ndef run_list(recipe_list_path, report_path):\n '''\n Run recipe list\n '''", "type": "non_informative" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do\n '''\n\n # Progress notification\n print(\"Looking for pkgs...\")\n\n # Count var\n found_pkgs = 0\n\n # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg,\n for adobe_folder in adobe_folders:\n\n # Var declaration\n install_pkg = None\n uninstall_pkg = None\n adobe_build_folder_path = os.path.join(packages_path, adobe_folder, 'Build')\n recipe_list_path = os.path.join(packages_path + 'adobe_admin_console_recipes_list.txt')\n report_path = os.path.join(packages_path + 'adobe_admin_console_recipes_report.plist')\n\n # Progress notification\n print(f\"Checking {adobe_build_folder_path}...\")\n\n if not os.path.isdir(adobe_build_folder_path):\n print(f\"No Build dir at {adobe_build_folder_path}... skipping...\")\n else:\n print(f\"Found Build dir at {adobe_build_folder_path}...\")\n # Look for *_Install.pkg\n try:\n install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0]\n print(f\"Found {install_pkg}...\")\n except IndexError:\n print(f\"Cannot find *_Install.pkg within: {adobe_build_folder_path}...\")\n\n # Look for *_Uninstall.pkg\n try:\n uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path,\n '*_Uninstall.pkg'))[0]\n print(f\"Found {uninstall_pkg}...\")\n except IndexError:\n print(\"Cannot find *_Uninstall.pkg within: {adobe_build_folder_path}...\")\n\n # If we can find both *_Install.pkg and *_Uninstall.pkg, add to recipe_list_path\n if install_pkg and uninstall_pkg:\n # Increment count\n found_pkgs += 1\n # Append to recipe_list_path\n create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type)\n else:\n print(f\"ERROR: Cannot find {adobe_folder}, these recipes need packages of the \"\n \"Managed Package variety, which include _Install and _Uninstall packages\"\n \".... skipping...\")\n\n # If we did not find any pkg pairs to import, exit\n if found_pkgs == 0:\n print(\"ERROR: No Adobe pkg pairs found, exiting...\")\n sys.exit(1)\n\n # Run recipe list\n run_list(recipe_list_path, report_path)\n\n\ndef create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type):\n '''\n Create recipe list\n '''\n\n # Var declaration\n override_path = None\n\n # Look for recipes in override_dirs\n for override_dir in override_dirs:\n recipe_files = os.listdir(override_dir)\n for recipe_file in recipe_files:\n if recipe_file.startswith(adobe_folder) and recipe_type in recipe_file:\n override_path = os.path.join(override_dir, recipe_file)\n\n if not override_path:\n # Return when we cannot find a matching override\n print(f\"Cannot find override starting with: {adobe_folder}, skipping...\")\n return\n print(f\"Found override at: {override_path}, proceeding...\")\n\n # Create an empty file at recipe_list_path, if this is the 1st found pkg\n if found_pkgs == 1:\n with open(recipe_list_path, 'w', encoding='utf-8') as new_file:\n new_file.write('')\n\n # Retrieve override name from file", "type": "non_informative" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib", "type": "non_informative" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports", "type": "non_informative" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do\n '''\n\n # Progress notification\n print(\"Looking for pkgs...\")\n\n # Count var\n found_pkgs = 0\n\n # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg,", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do\n '''\n\n # Progress notification\n print(\"Looking for pkgs...\")\n\n # Count var\n found_pkgs = 0\n\n # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg,\n for adobe_folder in adobe_folders:\n\n # Var declaration\n install_pkg = None\n uninstall_pkg = None\n adobe_build_folder_path = os.path.join(packages_path, adobe_folder, 'Build')\n recipe_list_path = os.path.join(packages_path + 'adobe_admin_console_recipes_list.txt')\n report_path = os.path.join(packages_path + 'adobe_admin_console_recipes_report.plist')\n\n # Progress notification\n print(f\"Checking {adobe_build_folder_path}...\")\n\n if not os.path.isdir(adobe_build_folder_path):\n print(f\"No Build dir at {adobe_build_folder_path}... skipping...\")\n else:\n print(f\"Found Build dir at {adobe_build_folder_path}...\")\n # Look for *_Install.pkg\n try:\n install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0]\n print(f\"Found {install_pkg}...\")\n except IndexError:\n print(f\"Cannot find *_Install.pkg within: {adobe_build_folder_path}...\")\n\n # Look for *_Uninstall.pkg\n try:\n uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path,", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do\n '''\n\n # Progress notification\n print(\"Looking for pkgs...\")\n\n # Count var\n found_pkgs = 0\n\n # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg,\n for adobe_folder in adobe_folders:\n\n # Var declaration\n install_pkg = None\n uninstall_pkg = None\n adobe_build_folder_path = os.path.join(packages_path, adobe_folder, 'Build')\n recipe_list_path = os.path.join(packages_path + 'adobe_admin_console_recipes_list.txt')\n report_path = os.path.join(packages_path + 'adobe_admin_console_recipes_report.plist')\n\n # Progress notification\n print(f\"Checking {adobe_build_folder_path}...\")\n\n if not os.path.isdir(adobe_build_folder_path):\n print(f\"No Build dir at {adobe_build_folder_path}... skipping...\")\n else:\n print(f\"Found Build dir at {adobe_build_folder_path}...\")\n # Look for *_Install.pkg\n try:\n install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0]\n print(f\"Found {install_pkg}...\")\n except IndexError:\n print(f\"Cannot find *_Install.pkg within: {adobe_build_folder_path}...\")\n\n # Look for *_Uninstall.pkg\n try:\n uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path,\n '*_Uninstall.pkg'))[0]\n print(f\"Found {uninstall_pkg}...\")\n except IndexError:\n print(\"Cannot find *_Uninstall.pkg within: {adobe_build_folder_path}...\")\n\n # If we can find both *_Install.pkg and *_Uninstall.pkg, add to recipe_list_path\n if install_pkg and uninstall_pkg:\n # Increment count\n found_pkgs += 1\n # Append to recipe_list_path\n create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type)\n else:\n print(f\"ERROR: Cannot find {adobe_folder}, these recipes need packages of the \"\n \"Managed Package variety, which include _Install and _Uninstall packages\"\n \".... skipping...\")\n\n # If we did not find any pkg pairs to import, exit\n if found_pkgs == 0:\n print(\"ERROR: No Adobe pkg pairs found, exiting...\")\n sys.exit(1)\n\n # Run recipe list\n run_list(recipe_list_path, report_path)\n\n\ndef create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type):\n '''\n Create recipe list\n '''\n\n # Var declaration\n override_path = None\n\n # Look for recipes in override_dirs", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nImports Adobe Admin Console Packages\n\n'''\n\n# Standard Imports\nimport argparse\nimport glob\nimport os\nimport plistlib\nimport subprocess\nimport sys\nimport yaml\n\n# pylint: disable = import-error\nfrom CoreFoundation import CFPreferencesCopyAppValue\n\n\n# Version\n__version__ = '1.0'\n\n\n# Functions\ndef main():\n '''\n Check passed arguments before proceeding\n '''\n\n # Setup arparse\n parser = argparse.ArgumentParser()\n parser.add_argument('type', type=str, help=\"Recipe type, for example: \\\"munki\\\" or \\\"jss\\\"\")\n arg_parser = parser.parse_args()\n\n # Retrieve passed arguments, and assign to variables\n recipe_type = arg_parser.type.lower()\n packages_path = os.path.expanduser('~/Downloads/')\n\n # Check that packages_path exists\n if not os.path.exists(packages_path):\n print(f\"ERROR: Cannot locate directory, {packages_path}... exiting...\")\n sys.exit(1)\n\n # Check that packages_path is a directory\n if not os.path.isdir(packages_path):\n print(f\"ERROR: {packages_path} is a not a directory... exiting...\")\n sys.exit(1)\n\n # Check for Adobe* dirs\n look_for_dirs(packages_path, recipe_type)\n\n\ndef look_for_dirs(packages_path, recipe_type):\n '''\n Look for dirs starting with Adobe*, in packages_path\n '''\n\n # Progress notification\n print(f\"Looking in {packages_path} for Adobe* folders ...\")\n\n # Create empty list\n adobe_folders = []\n\n # Look within packages_path for Adobe* items, add to adobe_folders list if found\n for some_item in os.listdir(packages_path):\n some_path = os.path.join(packages_path, some_item)\n if os.path.isdir(some_path):\n if some_item.startswith('Adobe'):\n adobe_folders.append(some_item)\n\n # If no folders are found, exit\n if not adobe_folders:\n print(f\"No Adobe* folders found in {packages_path}, exiting...\")\n sys.exit(1)\n\n # If 1 or more folders are found, notify and proceed.\n if len(adobe_folders) == 1:\n print(\"1 Adobe folder found...\")\n else:\n print(f\"{len(adobe_folders)} Adobe folders found...\")\n\n # Get the override_dirs\n try:\n override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS',\n 'com.github.autopkg').split()\n except AttributeError:\n override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg',\n 'RecipeOverrides').split()\n print(f\"Override dirs: {override_dirs}\")\n\n # Check for pkg's\n pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)\n\n\ndef pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type):\n '''\n Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do\n '''\n\n # Progress notification\n print(\"Looking for pkgs...\")\n\n # Count var\n found_pkgs = 0\n\n # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg,\n for adobe_folder in adobe_folders:\n\n # Var declaration\n install_pkg = None\n uninstall_pkg = None\n adobe_build_folder_path = os.path.join(packages_path, adobe_folder, 'Build')\n recipe_list_path = os.path.join(packages_path + 'adobe_admin_console_recipes_list.txt')\n report_path = os.path.join(packages_path + 'adobe_admin_console_recipes_report.plist')\n\n # Progress notification\n print(f\"Checking {adobe_build_folder_path}...\")\n\n if not os.path.isdir(adobe_build_folder_path):\n print(f\"No Build dir at {adobe_build_folder_path}... skipping...\")\n else:\n print(f\"Found Build dir at {adobe_build_folder_path}...\")\n # Look for *_Install.pkg\n try:\n install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0]\n print(f\"Found {install_pkg}...\")\n except IndexError:\n print(f\"Cannot find *_Install.pkg within: {adobe_build_folder_path}...\")\n\n # Look for *_Uninstall.pkg\n try:\n uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path,\n '*_Uninstall.pkg'))[0]\n print(f\"Found {uninstall_pkg}...\")\n except IndexError:\n print(\"Cannot find *_Uninstall.pkg within: {adobe_build_folder_path}...\")\n\n # If we can find both *_Install.pkg and *_Uninstall.pkg, add to recipe_list_path\n if install_pkg and uninstall_pkg:\n # Increment count\n found_pkgs += 1\n # Append to recipe_list_path\n create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type)\n else:\n print(f\"ERROR: Cannot find {adobe_folder}, these recipes need packages of the \"\n \"Managed Package variety, which include _Install and _Uninstall packages\"\n \".... skipping...\")\n\n # If we did not find any pkg pairs to import, exit\n if found_pkgs == 0:\n print(\"ERROR: No Adobe pkg pairs found, exiting...\")\n sys.exit(1)\n\n # Run recipe list\n run_list(recipe_list_path, report_path)\n\n\ndef create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type):\n '''\n Create recipe list\n '''\n\n # Var declaration\n override_path = None\n\n # Look for recipes in override_dirs\n for override_dir in override_dirs:\n recipe_files = os.listdir(override_dir)\n for recipe_file in recipe_files:\n if recipe_file.startswith(adobe_folder) and recipe_type in recipe_file:\n override_path = os.path.join(override_dir, recipe_file)\n\n if not override_path:\n # Return when we cannot find a matching override\n print(f\"Cannot find override starting with: {adobe_folder}, skipping...\")\n return\n print(f\"Found override at: {override_path}, proceeding...\")\n\n # Create an empty file at recipe_list_path, if this is the 1st found pkg\n if found_pkgs == 1:\n with open(recipe_list_path, 'w', encoding='utf-8') as new_file:\n new_file.write('')\n\n # Retrieve override name from file\n # Borrowed with <3 from:\n # https://github.com/autopkg/autopkg/blob/405c913deab15042819e2f77f1587a805b7c1ada/Code/autopkglib/__init__.py#L341-L359\n if override_path.endswith(\".yaml\"):\n try:\n # try to read it as yaml\n with open (override_path, 'rb') as read_file:\n recipe_dict = yaml.load(read_file, Loader=yaml.FullLoader)\n override_name = recipe_dict[\"Identifier\"]\n # pylint: disable = broad-except\n except Exception as err_msg:\n print(f\"ERROR: yaml error for {override_path}: {err_msg}\")\n return\n try:\n # try to read it as a plist\n with open (override_path, 'rb') as read_file:\n recipe_dict = plistlib.load(read_file)\n override_name = recipe_dict[\"Identifier\"]\n # pylint: disable = broad-except\n except Exception as err_msg:\n print(f\"ERROR: plist error for {override_path}: {err_msg}\")", "type": "random" } ]
[ " pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type)", " create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type)", " look_for_dirs(packages_path, recipe_type)", " run_list(recipe_list_path, report_path)", " recipe_dict = yaml.load(read_file, Loader=yaml.FullLoader)", " recipe_dict = plistlib.load(read_file)", " main()", " '''", "", " # Borrowed with <3 from:", "import subprocess", "import argparse", " for adobe_folder in adobe_folders:", " '*_Uninstall.pkg'))[0]", " for override_dir in override_dirs:", " try:", " return" ]
METASEP
41
autopkg__datajar-recipes
autopkg__datajar-recipes METASEP ngrok/ngrokVersioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for ngrokVersioner class ''' # Standard Imports from __future__ import absolute_import import subprocess import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['ngrokVersioner'] __version__ = '1.0' # pylint: disable = too-few-public-methods class ngrokVersioner(Processor): ''' Returns the version from the ngrok binary Raising if the key is not found. ''' description = __doc__ input_variables = { 'binary_path': { 'required': True, 'description': ('Path to the ngrok binary.'), }, } output_variables = { 'version': { 'description': ('Version of the ngrok binary.'), }, } def main(self): ''' See docstring for ngrokVersioner class ''' # var declaration version = None # Progress notification self.output("Looking for: {}".format(self.env['binary_path'])) # If binary exists if os.path.isfile(self.env['binary_path']): # Get binary version, from: https://www.ngrok.com/doc/9.54.0/Use.htm#Help_command # raise if we error try: version = subprocess.check_output([self.env['binary_path'], '-v'] ).split()[2].decode('utf-8') except subprocess.CalledProcessError: raise ProcessorError("Encountered an error when trying to get the " "ngrok binary version...") # Raise if binary is missing else: raise ProcessorError("Cannot access ngrok binary at path: {}" .format(self.env['binary_path'])) # We should only get here if we have passed the above, but this is belt and braces if version: self.env['version'] = version self.output("version: {}".format( self.env['version'])) else: raise ProcessorError("version is None") if __name__ == '__main__': PROCESSOR = ngrokVersioner() VMware Fusion 8/VMwareFusion8URLProvider.py METASEP #!/usr/bin/python # # Copyright 2014 Justin Rummel, # # Updates added 2018 by macmule: # https://github.com/autopkg/justinrummel-recipes/pull/7 # https://github.com/autopkg/justinrummel-recipes/pull/14 # # Thanks fuzzylogiq & Sterling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urllib, urllib2, gzip from xml.etree import ElementTree from StringIO import StringIO from autopkglib import Processor, ProcessorError from distutils.version import LooseVersion __all__ = ["VMwareFusion8URLProvider"] # variables VMWARE_BASE_URL = 'https://softwareupdate.vmware.com/cds/vmw-desktop/' FUSION = 'fusion.xml' MAJOR_VERSION = '8' # lock version in class VMwareFusion8URLProvider(Processor): description = "Provides URL to the latest VMware Fusion update release." input_variables = { "product_name": { "required": False, "description": "Default is '%s'." % FUSION, }, "base_url": { "required": False, "description": "Default is '%s." % VMWARE_BASE_URL, }, } output_variables = { "url": { "description": "URL to the latest VMware Fusion update release.", }, "version": { "description": "Version to the latest VMware Fusion update release.", }, } __doc__ = description def core_metadata(self, base_url, product_name, major_version): request = urllib2.Request(base_url+product_name) # print base_url try: vsus = urllib2.urlopen(request) except URLError, e: print e.reason data = vsus.read() # print data try: metaList = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" versions = [] for metadata in metaList: version = metadata.find("version") if (major_version == 'latest' or major_version == version.text.split('.')[0]): versions.append(version.text) if len(versions) == 0: raise ProcessorError("Could not find any versions for the \ major_version '%s'." % major_version) versions.sort(key=LooseVersion) self.latest = versions[-1] # print latest urls = [] for metadata in metaList: url = metadata.find("url") urls.append(url.text) matching = [s for s in urls if self.latest in s] core = [s for s in matching if "core" in s] # print core[0] vsus.close() request = urllib2.Request(base_url+core[0]) try: vLatest = urllib2.urlopen(request) except URLError, e: print e.reason buf = StringIO(vLatest.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() # print data try: metadataResponse = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" relativePath = metadataResponse.find("bulletin/componentList/component/relativePath") # print core[0].replace("metadata.xml.gz", relativePath.text) return base_url+core[0].replace("metadata.xml.gz", relativePath.text) def main(self): # Determine product_name, and base_url. product_name = self.env.get("product_name", FUSION) base_url = self.env.get("base_url", VMWARE_BASE_URL) major_version = self.env.get("major_version", MAJOR_VERSION) self.env["url"] = self.core_metadata(base_url, product_name, major_version) self.output("Found URL %s" % self.env["url"]) self.env["version"] = self.latest self.output("Found Version %s" % self.env["version"]) if __name__ == "__main__": processor = VMwareFusion8URLProvider() processor.execute_shell() VMware Fusion 12/DittoCopier.py METASEP #!/usr/local/autopkg/python # # Copyright 2010 Per Olofsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """See docstring for Copier class""" import glob import os.path import shutil import subprocess from autopkglib import ProcessorError from autopkglib.DmgMounter import DmgMounter __all__ = ["DittoCopier"] class DittoCopier(DmgMounter): """Copies source_path to destination_path. Uses the shell tool ditto in place of python's shutil.copy""" description = __doc__ input_variables = { "source_path": { "required": True, "description": ( "Path to a file or directory to copy. " "Can point to a path inside a .dmg which will be mounted. " "This path may also contain basic globbing characters such as " "the wildcard '*', but only the first result will be " "returned." ), }, "destination_path": {"required": True, "description": "Path to destination."} } output_variables = {} __doc__ = description def copy(self, source_item, dest_item): """Copies source_item to dest_item, overwriting if necessary""" # Remove destination if needed. if os.path.exists(dest_item) and overwrite: try: if os.path.isdir(dest_item) and not os.path.islink(dest_item): shutil.rmtree(dest_item) else: os.unlink(dest_item) except OSError as err: raise ProcessorError(f"Can't remove {dest_item}: {err.strerror}") # Copy file or directory. try: subprocess.run(['ditto', source_item, dest_item], stdout=subprocess.PIPE) self.output(f"Copied {source_item} to {dest_item}") except BaseException as err: raise ProcessorError(f"Can't copy {source_item} to {dest_item}: {err}") def main(self): source_path = self.env["source_path"] # Check if we're trying to copy something inside a dmg. (dmg_path, dmg, dmg_source_path) = self.parsePathForDMG(source_path) self.output( f"Parsed dmg results: dmg_path: {dmg_path}, dmg: {dmg}, " f"dmg_source_path: {dmg_source_path}", verbose_level=2, ) try: if dmg: # Mount dmg and copy path inside. mount_point = self.mount(dmg_path) source_path = os.path.join(mount_point, dmg_source_path) # process path with glob.glob matches = glob.glob(source_path) if len(matches) == 0: raise ProcessorError( f"Error processing path '{source_path}' with glob. " ) matched_source_path = matches[0] if len(matches) > 1: self.output( f"WARNING: Multiple paths match 'source_path' glob '{source_path}':" ) for match in matches: self.output(f" - {match}") if [c for c in "*?[]!" if c in source_path]: self.output( f"Using path '{matched_source_path}' matched from " f"globbed '{source_path}'." ) # do the copy self.copy( matched_source_path, self.env["destination_path"], ) finally: if dmg: self.unmount(dmg_path) if __name__ == "__main__": PROCESSOR = Copier() PROCESSOR.execute_shell() VMware Fusion 11/DittoCopier.py METASEP #!/usr/local/autopkg/python # # Copyright 2010 Per Olofsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """See docstring for Copier class""" import glob import os.path import shutil import subprocess from autopkglib import ProcessorError from autopkglib.DmgMounter import DmgMounter __all__ = ["DittoCopier"] class DittoCopier(DmgMounter): """Copies source_path to destination_path. Uses the shell tool ditto in place of python's shutil.copy""" description = __doc__ input_variables = { "source_path": { "required": True, "description": ( "Path to a file or directory to copy. " "Can point to a path inside a .dmg which will be mounted. " "This path may also contain basic globbing characters such as " "the wildcard '*', but only the first result will be " "returned." ), }, "destination_path": {"required": True, "description": "Path to destination."} } output_variables = {} __doc__ = description def copy(self, source_item, dest_item): """Copies source_item to dest_item, overwriting if necessary""" # Remove destination if needed. if os.path.exists(dest_item) and overwrite: try: if os.path.isdir(dest_item) and not os.path.islink(dest_item): shutil.rmtree(dest_item) else: os.unlink(dest_item) except OSError as err: raise ProcessorError(f"Can't remove {dest_item}: {err.strerror}") # Copy file or directory. try: subprocess.run(['ditto', source_item, dest_item], stdout=subprocess.PIPE) self.output(f"Copied {source_item} to {dest_item}") except BaseException as err: raise ProcessorError(f"Can't copy {source_item} to {dest_item}: {err}") def main(self): source_path = self.env["source_path"] # Check if we're trying to copy something inside a dmg. (dmg_path, dmg, dmg_source_path) = self.parsePathForDMG(source_path) self.output( f"Parsed dmg results: dmg_path: {dmg_path}, dmg: {dmg}, " f"dmg_source_path: {dmg_source_path}", verbose_level=2, ) try: if dmg: # Mount dmg and copy path inside. mount_point = self.mount(dmg_path) source_path = os.path.join(mount_point, dmg_source_path) # process path with glob.glob matches = glob.glob(source_path) if len(matches) == 0: raise ProcessorError( f"Error processing path '{source_path}' with glob. " ) matched_source_path = matches[0] if len(matches) > 1: self.output( f"WARNING: Multiple paths match 'source_path' glob '{source_path}':" ) for match in matches: self.output(f" - {match}") if [c for c in "*?[]!" if c in source_path]: self.output( f"Using path '{matched_source_path}' matched from " f"globbed '{source_path}'." ) # do the copy self.copy( matched_source_path, self.env["destination_path"], ) finally: if dmg: self.unmount(dmg_path) if __name__ == "__main__": PROCESSOR = Copier() PROCESSOR.execute_shell() VMware Fusion 10/VMwareFusion10URLProvider.py METASEP #!/usr/bin/python # # Copyright 2014 Justin Rummel, # # Updates added 2018 by macmule: # https://github.com/autopkg/justinrummel-recipes/pull/7 # https://github.com/autopkg/justinrummel-recipes/pull/14 # # Thanks fuzzylogiq & Sterling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urllib, urllib2, gzip from xml.etree import ElementTree from StringIO import StringIO from autopkglib import Processor, ProcessorError from distutils.version import LooseVersion __all__ = ["VMwareFusion10URLProvider"] # variables VMWARE_BASE_URL = 'https://softwareupdate.vmware.com/cds/vmw-desktop/' FUSION = 'fusion.xml' MAJOR_VERSION = '10' # lock version in class VMwareFusion10URLProvider(Processor): description = "Provides URL to the latest VMware Fusion update release." input_variables = { "product_name": { "required": False, "description": "Default is '%s'." % FUSION, }, "base_url": { "required": False, "description": "Default is '%s." % VMWARE_BASE_URL, }, } output_variables = { "url": { "description": "URL to the latest VMware Fusion update release.", }, "version": { "description": "Version to the latest VMware Fusion update release.", }, } __doc__ = description def core_metadata(self, base_url, product_name, major_version): request = urllib2.Request(base_url+product_name) # print base_url try: vsus = urllib2.urlopen(request) except URLError, e: print e.reason data = vsus.read() # print data try: metaList = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" versions = [] for metadata in metaList: version = metadata.find("version") if (major_version == 'latest' or major_version == version.text.split('.')[0]): versions.append(version.text) if len(versions) == 0: raise ProcessorError("Could not find any versions for the \ major_version '%s'." % major_version) versions.sort(key=LooseVersion) self.latest = versions[-1] # print latest urls = [] for metadata in metaList: url = metadata.find("url") urls.append(url.text) matching = [s for s in urls if self.latest in s] core = [s for s in matching if "core" in s] # print core[0] vsus.close() request = urllib2.Request(base_url+core[0]) try: vLatest = urllib2.urlopen(request) except URLError, e: print e.reason buf = StringIO(vLatest.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() # print data try: metadataResponse = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" relativePath = metadataResponse.find("bulletin/componentList/component/relativePath") # print core[0].replace("metadata.xml.gz", relativePath.text) return base_url+core[0].replace("metadata.xml.gz", relativePath.text) def main(self): # Determine product_name, and base_url. product_name = self.env.get("product_name", FUSION) base_url = self.env.get("base_url", VMWARE_BASE_URL) major_version = self.env.get("major_version", MAJOR_VERSION) self.env["url"] = self.core_metadata(base_url, product_name, major_version) self.output("Found URL %s" % self.env["url"]) self.env["version"] = self.latest self.output("Found Version %s" % self.env["version"]) if __name__ == "__main__": processor = VMwareFusion10URLProvider() processor.execute_shell() Traffic/TrafficXMLParser.py METASEP #!/usr/bin/python # Copyright 2020 dataJAR # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=import-error, too-few-public-methods """See docstring for TrafficXMLParser class""" from __future__ import absolute_import import os from xml.etree import ElementTree from autopkglib import Processor, ProcessorError __all__ = ["TrafficXMLParser"] __version__ = '1.1' class TrafficXMLParser(Processor): """Parses /META-INF/AIR/application.xml from the copied .air installer""" description = __doc__ input_variables = { "app_xml": { "required": True, "description": "Path to the application.xml." }, } output_variables = { "bundleid": { "description": "Bundled ID.", }, "version": { "description": "The value of CFBundleShortVersionString for the app bundle." }, } def main(self): """Parses /META-INF/AIR/application.xml from the copied .air installer""" if not os.path.exists(self.env["app_xml"]): raise ProcessorError("application.xml not found at %s" % self.env["app_xml"]) else: tree = ElementTree.parse(self.env["app_xml"]) for b_id in tree.iterfind('{http://ns.adobe.com/air/application/24.0}id'): self.env["bundleid"] = b_id.text for ver_num in tree.iterfind('{http://ns.adobe.com/air/application/24.0}versionNumber'): self.env["version"] = ver_num.text self.output("bundleid: %s" % self.env["bundleid"]) self.output("version: %s" % self.env["version"]) if __name__ == "__main__": PROCESSOR = TrafficXMLParser() Shared Processors/TempFileFinder.py METASEP #!/usr/local/autopkg/python # # Copyright 2013 Jesse Peterson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Temp as waiting PR merging and release including PR - https://github.com/autopkg/autopkg/pull/742 # """See docstring for TempFileFinder class""" import os.path from glob import glob from autopkglib import ProcessorError from autopkglib.DmgMounter import DmgMounter __all__ = ["TempFileFinder"] class TempFileFinder(DmgMounter): """Finds a filename for use in other Processors. Currently only supports glob filename patterns. Requires version 0.2.3. """ input_variables = { "pattern": { "description": "Shell glob pattern to match files by", "required": True, }, "find_method": { "description": ( "Type of pattern to match. Currently only " 'supported type is "glob" (also the default)' ), "default": "glob", "required": False, }, } output_variables = { "found_filename": {"description": "Full path of found filename"}, "dmg_found_filename": {"description": "DMG-relative path of found filename"}, "found_basename": {"description": "Basename of found filename"}, } description = __doc__ def globfind(self, pattern): """If multiple files are found the last alphanumerically sorted found file is returned""" glob_matches = glob(pattern, recursive=True) if len(glob_matches) < 1: raise ProcessorError("No matching filename found") glob_matches.sort() return glob_matches[-1] def main(self): pattern = self.env.get("pattern") method = self.env.get("find_method") if method != "glob": raise ProcessorError(f"Unsupported find_method: {method}") source_path = pattern # Check if we're trying to copy something inside a dmg. (dmg_path, dmg, dmg_source_path) = self.parsePathForDMG(source_path) try: if dmg: # Mount dmg and copy path inside. mount_point = self.mount(dmg_path) source_path = os.path.join(mount_point, dmg_source_path) # process path with globbing match = self.globfind(source_path) self.env["found_filename"] = match self.output( f"Found file match: '{self.env['found_filename']}' from globbed '{source_path}'" ) if dmg and match.startswith(mount_point): self.env["dmg_found_filename"] = match[len(mount_point) :].lstrip("/") self.output( f"DMG-relative file match: '{self.env['dmg_found_filename']}'" ) if match.endswith('/'): self.env["found_basename"] = os.path.basename(match.rstrip("/")) else: self.env["found_basename"] = os.path.basename(match) self.output( f"Basename match: '{self.env['found_basename']}'" ) finally: if dmg: self.unmount(dmg_path) if __name__ == "__main__": PROCESSOR = TempFileFinder() PROCESSOR.execute_shell() Shared Processors/JSONFileReader.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for JSONFileReader class ''' # Standard Imports from __future__ import absolute_import import json import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['JSONFileReader'] __version__ = '1.0' # pylint: disable = too-few-public-methods class JSONFileReader(Processor): ''' Parses a JSON file, returning the value of the supplied key. Raising if the key is not found. ''' description = __doc__ input_variables = { 'json_key': { 'required': True, 'description': ('Key to look for, and return the value of'), }, 'json_path': { 'required': True, 'description': ('Path to the JSON file'), }, } output_variables = { 'json_value': { 'description': ('Value of the JSON key'), }, } def main(self): ''' See docstring for JSONFileReader class ''' # Progress notification self.output("Looking for: {}".format(self.env['json_path'])) if os.path.isfile(self.env['json_path']): # Read in JSON file with open(self.env['json_path']) as json_file: # Try to parse json_path as json, raise if an issue try: load_json = json.load(json_file) except json.JSONDecodeError as err_msg: raise ProcessorError("Failed to parse {}: {}".format(self.env['json_path'], err_msg)) # Look for value of key json_key, raise if an issue try: self.env['json_value'] = load_json[self.env['json_key']] except KeyError: raise ProcessorError("Cannot find key {} within json file: {}" .format(self.env['json_key'], self.env['json_path'])) else: raise ProcessorError("Cannot access JSON file at path: {}" .format(self.env['json_path'])) self.output("json_value: {}".format(self.env['json_value'])) if __name__ == '__main__': PROCESSOR = JSONFileReader() Shared Processors/DistributionPkgInfo.py METASEP #!/usr/bin/python # Copyright 2020 dataJAR # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=import-error, too-few-public-methods """See docstring for DistributionPkgInfo class""" from __future__ import absolute_import from __future__ import print_function import os import subprocess from xml.etree import ElementTree from autopkglib import Processor, ProcessorError __all__ = ["DistributionPkgInfo"] __version__ = '1.1.1' class DistributionPkgInfo(Processor): """Parses a distribution pkg to pull the info, other formats to be added later""" description = __doc__ input_variables = { "pkg_path": { "required": True, "description": ("Path to the Pkg.."), }, } output_variables = { "pkg_id": { "description": ("The package ID.."), }, "version": { "description": ("The version of the pkg from it's info"), }, } # pylint: disable=too-many-branches def main(self): """Cobbled together from various sources, should extract information from a Distribution pkg""" # Build dir as needed,pinched with <3 from: # https://github.com/autopkg/autopkg/blob/master/Code/autopkglib/FlatPkgUnpacker.py#L72 # Extract pkg info, pinched with <3 from: # https://github.com/munki/munki/blob/master/code/client/munkilib/pkgutils.py#L374 self.env["abspkgpath"] = os.path.join(self.env["pkg_path"]) file_path = os.path.join(self.env["RECIPE_CACHE_DIR"], "downloads") cmd_toc = ['/usr/bin/xar', '-tf', self.env["abspkgpath"]] proc = subprocess.Popen(cmd_toc, bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (toc, err) = proc.communicate() toc = toc.decode("utf-8") .strip().split('\n') if proc.returncode == 0: # Walk trough the TOC entries if not os.path.exists(file_path): os.mkdir(file_path) for toc_entry in [item for item in toc if item.startswith('Distribution')]: cmd_extract = ['/usr/bin/xar', '-xf', self.env["abspkgpath"], \ toc_entry, '-C', file_path] _ = subprocess.call(cmd_extract) else: raise ProcessorError("pkg not found at pkg_path") dist_path = os.path.join(file_path, "Distribution") version = None pkg_id = None if not os.path.exists(dist_path): raise ProcessorError("Cannot find Distribution") else: tree = ElementTree.parse(dist_path) _ = tree.getroot() try: for elem in tree.iter(tag='product'): version = elem.get("version") for elem in tree.iter(tag='pkg-ref'): pkg_id = elem.get("id") except ElementTree.ParseError as err: print(("Can't parse distribution file %s: %s" % ('dist_path', err.strerror))) if not pkg_id: raise ProcessorError("cannot get pkg_id") else: self.env["pkg_id"] = pkg_id if not version: raise ProcessorError("cannot get version") else: self.env["version"] = version os.remove(dist_path) if __name__ == '__main__': PROCESSOR = DistributionPkgInfo() MacTeX Ghostscript/GhostscriptVersioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for GhostscriptVersioner class ''' # Standard Imports from __future__ import absolute_import import subprocess import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['GhostscriptVersioner'] __version__ = '1.0' # pylint: disable = too-few-public-methods class GhostscriptVersioner(Processor): ''' Returns the version from the Ghostscript binary Raising if the key is not found. ''' description = __doc__ input_variables = { 'binary_path': { 'required': True, 'description': ('Path to the Ghostscript binary.'), }, } output_variables = { 'version': { 'description': ('Version of the Ghostscript binary.'), }, } def main(self): ''' See docstring for GhostscriptVersioner class ''' # var declaration version = None # Progress notification self.output("Looking for: {}".format(self.env['binary_path'])) # If binary exists if os.path.isfile(self.env['binary_path']): # Get binary version, from: https://www.ghostscript.com/doc/9.54.0/Use.htm#Help_command # raise if we error try: version = subprocess.check_output([self.env['binary_path'], '-v'] ).split()[2].decode('utf-8') except subprocess.CalledProcessError: raise ProcessorError("Encountered an error when trying to get the " "Ghostscript binary version...") # Raise if binary is missing else: raise ProcessorError("Cannot access Ghostscript binary at path: {}" .format(self.env['binary_path'])) # We should only get here if we have passed the above, but this is belt and braces if version: self.env['version'] = version self.output("version: {}".format( self.env['version'])) else: raise ProcessorError("version is None") if __name__ == '__main__': PROCESSOR = GhostscriptVersioner() BrowserStackLocal/BrowserStackLocalBinaryVersioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for BrowserStackLocalBinaryVersioner class ''' # Standard Imports from __future__ import absolute_import import subprocess import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['BrowserStackLocalBinaryVersioner'] __version__ = '1.0' # pylint: disable = too-few-public-methods class BrowserStackLocalBinaryVersioner(Processor): ''' Returns the version from the BrowserStackLocal binary Raising if the key is not found. ''' description = __doc__ input_variables = { 'binary_path': { 'required': True, 'description': ('Path to the BrowserStackLocal binary.'), }, } output_variables = { 'version': { 'description': ('Version of the BrowserStackLocal binary.'), }, } def main(self): ''' See docstring for BrowserStackLocalBinaryVersioner class ''' # var declaration version = None # Progress notification self.output("Looking for: {}".format(self.env['binary_path'])) # If binary exists if os.path.isfile(self.env['binary_path']): # Get binary version, from: https://www.browserstack.com/local-testing/binary-params # raise if we error try: version = subprocess.check_output([self.env['binary_path'], '--version'] ).split()[3].decode('utf-8') except subprocess.CalledProcessError: raise ProcessorError("Encountered an error when trying to get the " "BrowserStackLocal binary version...") # Raise if binary is missing else: raise ProcessorError("Cannot access BrowserStackLocal binary at path: {}" .format(self.env['binary_path'])) # We should only get here if we have passed the above, but this is belt and braces if version: self.env['version'] = version self.output("version: {}".format( self.env['version'])) else: raise ProcessorError("version is None") if __name__ == '__main__': PROCESSOR = BrowserStackLocalBinaryVersioner() Adobe CC 2019/AdobeCC2019Versioner.py METASEP #!/usr/bin/python # Copyright 2021 dataJAR # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=import-error """See docstring for AdobeCC2019Versioner class""" from __future__ import absolute_import import glob import json import os import re import zipfile from xml.etree import ElementTree try: from plistlib import loads as load_plist except ImportError: from FoundationPlist import readPlistFromString as load_plist from autopkglib import Processor, ProcessorError __all__ = ['AdobeCC2019Versioner'] __version__ = ['1.2.1'] class AdobeCC2019Versioner(Processor): """Parses generated Adobe Admin Console CC 2019 pkgs for detailed application path and bundle version info""" description = __doc__ input_variables = { } output_variables = { 'additional_pkginfo': { 'description': 'Some pkginfo fields extracted from the Adobe metadata.', }, 'jss_inventory_name': { 'description': 'Application title for jamf pro smart group criteria.', }, 'version': { 'description': ('The value of CFBundleShortVersionString for the app bundle. ' 'This may match user_facing_version, but it may also be more ' 'specific and add another version component.'), }, } def main(self): """Find the Adobe*_Install.pkg in the Downloads dir based on the name""" download_path = os.path.expanduser('~/Downloads') self.env['PKG'] = os.path.join(download_path, self.env['NAME'], \ 'Build', self.env['NAME'] + '_Install.pkg') self.output('pkg %s' % self.env['PKG']) self.env['uninstaller_pkg_path'] = glob.glob(os.path.join\ (os.path.dirname(self.env['PKG']), '*_Uninstall.pkg'))[0] self.process_installer() def process_installer(self): ''' Determine a pkginfo, version and jss inventory name from the created package. Inputs: PKG: Path to the pkg Outputs: app_json/proxy_xml: The path of the files that within the pkg's ''' install_lang = None option_xml_path = os.path.join(self.env['PKG'], 'Contents', 'Resources', 'optionXML.xml') self.output('Processing %s' % option_xml_path) option_xml = ElementTree.parse(option_xml_path) for hd_media in option_xml.findall('.//HDMedias/HDMedia'): if hd_media.findtext('MediaType') == 'Product': install_lang = hd_media.findtext('installLang') self.env['sap_code'] = hd_media.findtext('SAPCode') self.output('SAP Code: %s' % self.env['sap_code']) self.env['target_folder'] = hd_media.findtext('TargetFolderName') if install_lang is None: for ribs_media in option_xml.findall('.//Medias/Media'): install_lang = ribs_media.findtext('installLang') self.env['sap_code'] = ribs_media.findtext('SAPCode') self.output('SAP Code: %s' % self.env['sap_code']) self.env['target_folder'] = ribs_media.findtext('TargetFolderName') self.env['app_json'] = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], 'Application.json') # If Application.json exists, we're looking at a HD installer if os.path.exists(self.env['app_json']): if not self.env['sap_code'] is 'APRO': self.output('Installer is HyperDrive') self.output('app_json: %s' % self.env['app_json']) self.process_hd_installer() else: # If not a HD installer Acrobat is a 'current' title with a # RIBS PKG installer we can extract needed metadata from self.env['proxy_xml'] = os.path.join(self.env['PKG'], 'Contents/Resources/Setup', \ self.env['target_folder'], 'proxy.xml') if not os.path.exists(self.env['proxy_xml']): raise ProcessorError('APRO selected, proxy.xml not found at: %s' \ % self.env['proxy_xml']) else: self.process_apro_installer() def process_apro_installer(self): ''' Process APRO installer - proxy_xml: Path to proxy_xml if pkg is APRO ''' self.output('Processing Acrobat installer') self.output('proxy_xml: %s' % self.env['proxy_xml']) tree = ElementTree.parse(self.env['proxy_xml']) root = tree.getroot() app_bundle_text = root.findtext\ ('./ThirdPartyComponent/Metadata/Properties/Property[@name=\'path\']') app_bundle = app_bundle_text.split('/')[1] self.output('app_bundle: %s' % app_bundle) app_path_text = root.findtext('./InstallDir/Platform') self.output('app_path_text: %s' % app_path_text) app_path = app_path_text.split('/')[1] self.output('app_path: %s' % app_path) installed_path = os.path.join('/Applications', app_path, app_bundle) self.output('installed_path: %s' % installed_path) app_version = root.findtext('./InstallerProperties/Property[@name=\'ProductVersion\']') self.output('app_version: %s' % app_version) self.env['display_name'] = app_path + ' CC 2019' self.output('display_name: %s' % self.env['display_name']) self.env['vers_compare_key'] = 'CFBundleShortVersionString' self.output('vers_compare_key: %s' % self.env['vers_compare_key']) app_bundle_id = 'com.adobe.Acrobat.Pro' self.output('app_bundle_id: %s' % app_bundle_id) self.create_pkginfo(app_bundle, app_bundle_id, app_version, installed_path) # pylint: disable=too-many-branches def process_hd_installer(self): ''' Process HD installer - app_json: Path to the Application JSON from within the PKG ''' #pylint: disable=too-many-locals, too-many-statements self.output('Processing HD installer') with open(self.env['app_json']) as json_file: load_json = json.load(json_file) # AppLaunch is not always in the same format, but is splittable if 'AppLaunch' in load_json: # Bridge CC is HD but does not have AppLaunch app_launch = load_json['AppLaunch'] self.output('app_launch: %s' % app_launch) app_details = list(re.split('/', app_launch)) if app_details[2].endswith('.app'): app_bundle = app_details[2] app_path = app_details[1] else: app_bundle = app_details[1] app_path = list(re.split('/', (load_json['InstallDir']['value'])))[1] self.output('app_bundle: %s' % app_bundle) self.output('app_path: %s' % app_path) installed_path = os.path.join('/Applications', app_path, app_bundle) self.output('installed_path: %s' % installed_path) if not app_path.endswith('CC') and not app_path.endswith('CC 2019'): self.env['display_name'] = app_path + ' CC 2019' elif app_path.endswith('CC') and not app_path.endswith('CC 2019'): self.env['display_name'] = app_path + ' 2019' else: self.env['display_name'] = app_path self.output('display_name: %s' % self.env['display_name']) zip_file = load_json['Packages']['Package'][0]['PackageName'] self.output('zip_file: %s' % zip_file) zip_path = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], zip_file + '.zip') self.output('zip_path: %s' % zip_path) with zipfile.ZipFile(zip_path, mode='r') as myzip: with myzip.open(zip_file + '.pimx') as mytxt: txt = mytxt.read() tree = ElementTree.fromstring(txt) # Loop through .pmx's Assets, look for target=[INSTALLDIR], # then grab Assets Source. # Break when found .app/Contents/Info.plist for elem in tree.findall('Assets'): for i in elem.getchildren(): if i.attrib['target'].upper().startswith('[INSTALLDIR]'): bundle_location = i.attrib['source'] self.output('bundle_location: %s' % bundle_location) else: continue if not bundle_location.startswith('[StagingFolder]'): continue elif bundle_location.endswith('Icons') or \ bundle_location.endswith('AMT'): continue else: bundle_location = bundle_location[16:] if bundle_location.endswith('.app'): zip_bundle = os.path.join('1', bundle_location, \ 'Contents/Info.plist') else: zip_bundle = os.path.join('1', bundle_location, \ app_bundle, 'Contents/Info.plist') try: with myzip.open(zip_bundle) as myplist: plist = myplist.read() data = load_plist(plist) if self.env['sap_code'] == 'LTRM': self.env['vers_compare_key'] = 'CFBundleVersion' else: self.env['vers_compare_key'] = \ 'CFBundleShortVersionString' self.output('vers_compare_key: %s' % \ self.env['vers_compare_key']) app_version = data[self.env['vers_compare_key']] app_bundle_id = data['CFBundleIdentifier'] self.output('app_bundle_id: %s' % app_bundle_id) self.output('staging_folder: %s' % bundle_location) self.output('staging_folder_path: %s' % zip_bundle) self.output('app_version: %s' % app_version) self.output('app_bundle: %s' % app_bundle) break except zipfile.BadZipfile: continue # Now we have the deets, let's use them self.create_pkginfo(app_bundle, app_bundle_id, app_version, installed_path) def create_pkginfo(self, app_bundle, app_bundle_id, app_version, installed_path): """Create pkginfo with found details Args: app_bundle (str): Bundle name app_version (str): Bundle version installed_path (str): The path where the installed item will be installed. """ self.env['jss_inventory_name'] = app_bundle self.env['pkg_path'] = self.env['PKG'] self.env['version'] = app_version pkginfo = { 'display_name': self.env['display_name'], 'minimum_os_version': self.env['MINIMUM_OS_VERSION'] } if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']: pkginfo['installs'] = [{ self.env['vers_compare_key']: self.env['version'], 'path': installed_path, 'type': 'application', 'version_comparison_key': self.env['vers_compare_key'], 'CFBundleIdentifier': app_bundle_id, }] self.env['additional_pkginfo'] = pkginfo self.output('additional_pkginfo: %s' % self.env['additional_pkginfo']) if __name__ == '__main__': PROCESSOR = AdobeCC2019Versioner() Adobe CC 2019/AdobeCC2019Importer.py METASEP #!/usr/bin/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed "as is" by DATA JAR LTD. DESCRIPTION Imports Adobe CC 2019 titles found in running users ~/Downloads ''' from __future__ import absolute_import from __future__ import print_function import argparse import glob import os import subprocess import sys __version__ = '1.1' def main(): '''Gimme some main''' adobe_folders = [] for some_item in os.listdir(DOWNLOADS_PATH): some_path = os.path.join(DOWNLOADS_PATH, some_item) if os.path.isdir(some_path): if some_item.startswith('Adobe') and some_item.endswith('CC2019'): adobe_folders.append(some_item) if not len(adobe_folders): print('No Adobe*CC2019 folders found in %s, exiting...' % DOWNLOADS_PATH) sys.exit(1) if len(adobe_folders) == 1: print('1 Adobe CC 2019 folder found, creating recipe list...') else: print('%s Adobe CC 2019 folder found, creating recipe list...' % len(adobe_folders)) open(ADOBE_LIST, 'w').close() pkg_checker(adobe_folders) def pkg_checker(adobe_folders): '''Check that we have the Install_pkg's & proceed if we do''' found_pkgs = 0 print('Looking for pkgs...') for adobe_folder in sorted(adobe_folders): try: install_pkg = glob.glob(os.path.join(DOWNLOADS_PATH, adobe_folder, \ 'Build', '*_Install.pkg'))[0] print('Found {0}...'.format(install_pkg)) if os.path.exists(install_pkg): create_list(adobe_folder) found_pkgs += 1 else: print('Cannot find pkg ({0}), for {1}... Skipping...'.format\ (install_pkg, adobe_folder)) except IndexError as err_msg: print('Skipping {0}, as cannot find Install.pkg: {1}...'.format(adobe_folder, err_msg)) if found_pkgs == 0: print('No pkgs found, exiting...') sys.exit(1) else: run_list() def create_list(adobe_folder): ''' Create recipe list ''' library_dir = os.path.expanduser('~/Library/') override_path = os.path.join(library_dir, 'AutoPkg', 'RecipeOverrides', \ adobe_folder + '.' \ + RECIPE_TYPE + '.recipe') override_name = 'local.' + RECIPE_TYPE + '.' + adobe_folder if not os.path.isfile(override_path): print('Skipping {0}, as cannot find override...'.format(override_path)) list_file = open(ADOBE_LIST, 'a+') list_file.write(override_name + '\n') list_file.close() def run_list(): '''Run recipe list''' if os.path.exists(ADOBE_LIST): print('Running recipe_list: `{0}`'.format(ADOBE_LIST)) print() cmd_args = ['/usr/local/bin/autopkg', 'run', '-v', '--recipe-list', ADOBE_LIST, \ '--report-plist', REPORT_PATH] print('Running `{0}`...'.format(cmd_args)) subprocess.call(cmd_args) else: print('Recipe list not populated, make sure you have the needed overrides in place....') if __name__ == '__main__': # Try to locate autopkg if not os.path.exists('/usr/local/bin/autopkg'): print('Cannot find autopkg') sys.exit(1) # Parse recipe type argument PARSER = argparse.ArgumentParser() PARSER.add_argument('type', type=str, help='Recipe type, either "munki" or "jss"') PARSER_ARGS = PARSER.parse_args() RECIPE_TYPE = PARSER_ARGS.type.lower() # Constants DOWNLOADS_PATH = os.path.expanduser('~/Downloads/') ADOBE_LIST = os.path.join(DOWNLOADS_PATH + 'adobecc2019_list.txt') REPORT_PATH = os.path.join(DOWNLOADS_PATH + 'adobecc2019_report.plist') main() Adobe 2021/Adobe2021Versioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for AdobeCC2021Versioner class ''' # Standard Imports from __future__ import absolute_import import json import os import re import xml import zipfile from xml.etree import ElementTree # AutoPkg imports # pylint: disable = import-error try: from plistlib import loads as load_plist except ImportError: from FoundationPlist import readPlistFromString as load_plist from autopkglib import Processor, ProcessorError # Define class __all__ = ['Adobe2021Versioner'] __version__ = ['1.4.10'] # Class def class Adobe2021Versioner(Processor): ''' Parses generated Adobe Admin Console 2021 pkgs for detailed application path and bundle version info. ''' description = __doc__ input_variables = { } output_variables = { 'additional_pkginfo': { 'description': 'Some pkginfo fields extracted from the Adobe metadata.', }, 'jss_inventory_name': { 'description': 'Application title for jamf pro smart group criteria.', }, 'version': { 'description': ('The value of CFBundleShortVersionString for the app bundle. ' 'This may match user_facing_version, but it may also be more ' 'specific and add another version component.'), }, 'architecture_type': { 'description': ('The value of ProcessorArchitecture for the package. ' 'This is either -Intel or -ARM to add with renaming the ' 'package disk image'), }, } def main(self): ''' Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise if corresponding *_Uninstall.pkg is missing. Then determine a pkginfo, version and jss inventory name from the Adobe*_Install.pkg ''' # var declaration download_path = os.path.expanduser('~/Downloads') install_lang = None # Path to Adobe*_Install.pkg in the titles Downloads folder self.env['PKG'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Install.pkg')) self.output("install_pkg {}".format(self.env['PKG'])) # Path to Adobe*_Uninstall.pkg n the titles Downloads folder self.env['uninstaller_pkg_path'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Uninstall.pkg')) self.output("uninstall_pkg {}".format(self.env['uninstaller_pkg_path'])) # Path to titles optionXML.xml option_xml_path = os.path.join(self.env['PKG'], 'Contents', 'Resources', 'optionXML.xml') self.output("Processing {}".format(option_xml_path)) # Try to parse option_xml, raise if an issue try: option_xml = ElementTree.parse(option_xml_path) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(option_xml_path, err_msg)) # Check to see if HDMedia keys set for hd_media in option_xml.findall('.//HDMedias/HDMedia'): # If we have HDMedia, set vars if hd_media.findtext('MediaType') == 'Product': install_lang = hd_media.findtext('installLang') self.env['sap_code'] = hd_media.findtext('SAPCode') self.env['target_folder'] = hd_media.findtext('TargetFolderName') # Check for Processor Architecture self.env['architecture_type'] = option_xml.findtext('ProcessorArchitecture') # If no HDMedia is found, then install_lang will be none if install_lang is None: # Get vars for RIBS media for ribs_media in option_xml.findall('.//Medias/Media'): install_lang = ribs_media.findtext('installLang') self.env['sap_code'] = ribs_media.findtext('SAPCode') self.env['target_folder'] = ribs_media.findtext('TargetFolderName') # Display progress self.output("sap_code: {}".format(self.env['sap_code'])) self.output("target_folder: {}".format(self.env['target_folder'])) self.output("architecture_type: {}".format(self.env['architecture_type'])) # Get app_json var self.env['app_json'] = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], 'Application.json') # If Application.json exists, we're looking at a HD installer if os.path.exists(self.env['app_json']): if not self.env['sap_code'] == 'APRO': # Process HD installer self.process_hd_installer_pt1() else: # If not a HD installer Acrobat is a 'current' title with a # RIBS PKG installer we can extract needed metadata from self.env['proxy_xml'] = (os.path.join(self.env['PKG'], 'Contents/Resources/Setup', self.env['target_folder'], 'proxy.xml')) # If proxy_xml does not exist, raise if not os.path.exists(self.env['proxy_xml']): raise ProcessorError("APRO selected, proxy.xml not found at: {}" .format(self.env['proxy_xml'])) # Else, process the APRO (Acrobat) installer self.process_apro_installer() def process_apro_installer(self): ''' Process APRO (Acrobat) installer ''' # Progress notification self.output("Processing Acrobat installer") self.output("proxy_xml: {}".format(self.env['proxy_xml'])) # Try to parse proxy_xml, raise if an issue try: parse_xml = ElementTree.parse(self.env['proxy_xml']) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(self.env['proxy_xml'], err_msg)) # Get root of xml root = parse_xml.getroot() # Get app_bundle app_bundle_text = (root.findtext ('./ThirdPartyComponent/Metadata/Properties/Property[@name=\'path\']')) self.env['app_bundle'] = app_bundle_text.split('/')[1] self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path app_path_text = root.findtext('./InstallDir/Platform') self.env['app_path'] = app_path_text.split('/')[1] self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # Get app_version self.env['app_version'] = (root.findtext ('./InstallerProperties/Property[@name=\'ProductVersion\']')) self.output("app_version: {}".format(self.env['app_version'])) # Get vers_compare_key self.env['vers_compare_key'] = 'CFBundleShortVersionString' self.output("vers_compare_key: {}".format(self.env['vers_compare_key'])) # Set bundle id self.env['app_bundle_id'] = 'com.adobe.Acrobat.Pro' self.output("app_bundle_id: {}".format(self.env['app_bundle_id'])) # Create pkginfo with found details self.create_pkginfo() def process_hd_installer_pt1(self): ''' Process HD installer - part 1 ''' # Progress notification self.output("Processing HD installer") # Read in app_json file with open(self.env['app_json']) as json_file: # Try to parse app_json as json, raise if an issue try: load_json = json.load(json_file) except json.JSONDecodeError as err_msg: raise ProcessorError("Failed to parse {}: {}".format(self.env['app_json'], err_msg)) # Get app_launch app_launch = load_json['AppLaunch'] self.output("app_launch: {}".format(app_launch)) # Get app_details, app_bundle and app_path app_details = list(re.split('/', app_launch)) if app_details[2].endswith('.app'): app_bundle = app_details[2] app_path = app_details[1] else: app_bundle = app_details[1] app_path = list(re.split('/', (load_json['InstallDir']['value'])))[1] # Get app_bundle self.env['app_bundle'] = app_bundle self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path self.env['app_path'] = app_path self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # 2nd part of process self.process_hd_installer_pt2(load_json) def process_hd_installer_pt2(self, load_json): ''' Process HD installer - part 2 ''' # Get name of the zip_file were to open zip_file = load_json['Packages']['Package'][0]['PackageName'] self.output("zip_file: {}".format(zip_file)) # Get pimx_dir if zip_file.endswith('-LearnPanel'): zip_file = load_json['Packages']['Package'][1]['PackageName'] pimx_dir = '2' else: pimx_dir = '1' self.output("pimx_dir: {}".format(pimx_dir)) # Get zip_path zip_path = (os.path.join(self.env['PKG'], 'Contents/Resources/HD', self.env['target_folder'], zip_file + '.zip')) self.output("zip_path: {}".format(zip_path)) # Open zip file, raise if fails try: with zipfile.ZipFile(zip_path, mode='r') as my_zip: # Read in pimx file with my_zip.open(zip_file + '.pimx') as my_txt: # Read in pimx file pimx_txt = my_txt.read() # Try to parse pimx file as XML, raise exception if fails try: xml_tree = ElementTree.fromstring(pimx_txt) # Try to read info.plist from within zip_bundle self.read_info_plist(my_zip, pimx_dir, xml_tree, zip_path) # If we cannot read in the pimx except xml.etree.ElementTree.ParseError as err_msg: self.output("Parsing {} failed with: {}, checking {}" .format(zip_file, err_msg, self.env['app_json'])) # Read in values from app_json self.parse_app_json(load_json) except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}".format(zip_path, err_msg)) # Now we have the deets, let's use them self.create_pkginfo() def get_generic_keys(self): ''' Generic keys to get regardless of title ''' # Progress notification self.env['installed_path'] = os.path.join('/Applications', self.env['app_path'], self.env['app_bundle']) self.output("installed_path: {}".format(self.env['installed_path'])) # Get display_name if not self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2021'): self.env['display_name'] = self.env['app_path'] + ' 2021' elif self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2021'): self.env['display_name'] = self.env['app_path'] + ' 2021' else: self.env['display_name'] = self.env['app_path'] # Progress notification self.output("display_name: {}".format(self.env['display_name'])) def read_info_plist(self, my_zip, pimx_dir, xml_tree, zip_path): ''' Try to read info.plist from within zip_bundle ''' # Loop through .pmx's Assets, look for target=[INSTALLDIR], # then grab Assets Source. # Break when found .app/Contents/Info.plist for xml_elem in xml_tree.findall('Assets'): for xml_item in xml_elem.getchildren(): # Below special tweak for the non-Classic Lightroom bundle if (xml_item.attrib['target'].upper().startswith('[INSTALLDIR]') and not xml_item.attrib['target'].endswith('Icons')): # Get bundle_location bundle_location = xml_item.attrib['source'] self.output("bundle_location: {}".format(bundle_location)) else: continue # Amend bundle_location as needed if not bundle_location.startswith('[StagingFolder]'): continue if bundle_location.endswith('Icons') or \ bundle_location.endswith('AMT'): continue bundle_location = bundle_location[16:] # Create zip_bundle if bundle_location.endswith('.app'): zip_bundle = (os.path.join(pimx_dir, bundle_location, 'Contents/Info.plist')) else: zip_bundle = (os.path.join(pimx_dir, bundle_location, self.env['app_bundle'], 'Contents/Info.plist')) # Try to read info.plist from within zip_bundle try: with my_zip.open(zip_bundle) as my_plist: info_plist = my_plist.read() data = load_plist(info_plist) # If the App is Lightroom (Classic or non-Classic) # we need to compare a different value in Info.plist if self.env['sap_code'] == 'LTRM' or \ self.env['sap_code'] == 'LRCC': self.env['vers_compare_key'] = 'CFBundleVersion' else: self.env['vers_compare_key'] = ( 'CFBundleShortVersionString') # Get version from info.plist app_version = data[self.env['vers_compare_key']] # Get bundleid from info.plist self.env['app_bundle_id'] = data['CFBundleIdentifier'] # Progress notifications self.output("vers_compare_key: {}" .format(self.env['vers_compare_key'])) self.output("app_bundle_id: {}" .format(self.env['app_bundle_id'])) self.output("staging_folder: {}" .format(bundle_location)) self.output("staging_folder_path: {}" .format(zip_bundle)) self.env['app_version'] = app_version self.output("app_version: {}".format(self.env['app_version'])) break # If we cannot read the zip file except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}" .format(zip_path, err_msg)) # pylint: disable = too-many-branches, too-many-statements def parse_app_json(self, load_json): ''' Read in values from app_json ''' # We'll override this later if needed self.env['vers_compare_key'] = 'CFBundleShortVersionString' # Get app_version, cautiously for now for only certain apps if self.env['sap_code'] == 'AICY': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.InCopy' elif self.env['sap_code'] == 'CHAR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Character-Animator.application' elif self.env['sap_code'] == 'DRWV': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.dreamweaver-18.1' elif self.env['sap_code'] == 'ESHR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.dimension' elif self.env['sap_code'] == 'FLPR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Adobe-Animate-2021.application' elif self.env['sap_code'] == 'IDSN': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.InDesign' elif self.env['sap_code'] == 'ILST': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.illustrator' elif self.env['sap_code'] == 'KBRG': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.bridge11' elif self.env['sap_code'] == 'LTRM': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.LightroomClassicCC7' self.env['vers_compare_key'] = 'CFBundleVersion' elif self.env['sap_code'] == 'PHSP': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Photoshop' elif self.env['sap_code'] == 'SBSTA': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.adobe-substance-3d-sampler' elif self.env['sap_code'] == 'SBSTD': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.substance-3d-designer' elif self.env['sap_code'] == 'SBSTP': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Adobe-Substance-3D-Painter' elif self.env['sap_code'] == 'SPRK': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.xd' elif self.env['sap_code'] == 'STGR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.stager' else: raise ProcessorError("Checking app_json for version details but sap code {}, " "is not within the known list of apps which we know to " "check via their Application.json".format(self.env['sap_code'])) self.output("app_version: {}".format(self.env['app_version'])) # Get app_bundle for app_launch in load_json['AppLaunch'].split('/'): if app_launch.endswith('.app'): app_bundle = ('/Applications/' + app_launch.split('.app')[0] + '/' + app_launch) self.output("app_bundle: {}".format(app_bundle)) def create_pkginfo(self): ''' Create pkginfo with found details ''' # More var declaration self.env['jss_inventory_name'] = self.env['app_bundle'] self.env['pkg_path'] = self.env['PKG'] self.env['version'] = self.env['app_version'] # Get minimum_os_version from override # https://github.com/autopkg/dataJAR-recipes/issues/138 pkginfo = { 'minimum_os_version': self.env['MINIMUM_OS_VERSION'] } # Allow the user to provide a display_name string that prevents CreativeCloudVersioner # from overriding it. if 'pkginfo' not in self.env or 'display_name' not in self.env['pkginfo']: pkginfo['display_name'] = self.env['display_name'] # Create pkginfo is missing from installs array if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']: pkginfo['installs'] = [{ self.env['vers_compare_key']: self.env['version'], 'path': self.env['installed_path'], 'type': 'application', 'version_comparison_key': self.env['vers_compare_key'], 'CFBundleIdentifier': self.env['app_bundle_id'], }] # Set Processor Architecture info if self.env['architecture_type'] == "x64": pkginfo['supported_architectures'] = [ 'x86_64', ] self.env['architecture_type'] = '-Intel' elif self.env['architecture_type'] == "arm64": pkginfo['supported_architectures'] = [ 'arm64', ] self.env['architecture_type'] = '-ARM' # Notify of additional_pkginfo self.env['additional_pkginfo'] = pkginfo self.output("additional_pkginfo: {}".format(self.env['additional_pkginfo'])) if __name__ == '__main__': PROCESSOR = Adobe2021Versioner() Adobe 2021/Adobe2021Importer.py METASEP #!/usr/local/autopkg/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION Imports Adobe 2021 titles found in running users ~/Downloads ''' # Standard Imports from __future__ import absolute_import from __future__ import print_function import argparse import glob import os import subprocess import sys # Version __version__ = '1.2' # Functions def main(): ''' Look within DOWNLOADS_PATH for Adobe*2021* items, add to adobe_folders list if found ''' # Progress notification print("Looking for {} folders ...".format(os.path.join(DOWNLOADS_PATH, 'Adobe*2021*'))) # Create empty list adobe_folders = [] # Look within DOWNLOADS_PATH for Adobe*2021 items, add to adobe_folders list if found for some_item in os.listdir(DOWNLOADS_PATH): some_path = os.path.join(DOWNLOADS_PATH, some_item) if os.path.isdir(some_path): if some_item.startswith('Adobe') and '2021' in (some_item): adobe_folders.append(some_item) # If no folders are found, exit if not adobe_folders: print("No Adobe*2021 folders found in {}, exiting...".format(DOWNLOADS_PATH)) sys.exit(1) # If 1 or moe folders are found, notify and proceed. if len(adobe_folders) == 1: print("1 Adobe 2021 folder found, creating recipe list...") else: print("{} Adobe 2021 folder found, creating recipe list...".format(len(adobe_folders))) # Check for pkg's pkg_checker(sorted(adobe_folders)) def pkg_checker(adobe_folders): ''' Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do ''' # Progress notification print("Looking for pkgs...") # count var found_pkgs = 0 # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg, for adobe_folder in adobe_folders: # var declaration install_pkg = None uninstall_pkg = None adobe_build_folder_path = os.path.join(DOWNLOADS_PATH, adobe_folder, 'Build') # Look for *_Install.pkg try: install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0] print("Found {}...".format(install_pkg)) except IndexError: print("Cannot find *_Install.pkg within: {}...".format(adobe_build_folder_path)) # Look for *_Uninstall.pkg try: uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0] print("Found {}...".format(uninstall_pkg)) except IndexError: print("Cannot find *_Uninstall.pkg within: {}...".format(adobe_build_folder_path)) # If we can find both *_Install.pkg and *_Uninstall.pkg, add to ADOBE_LIST if install_pkg and uninstall_pkg: # Increment count found_pkgs += 1 # Append to ADOBE_LIST create_list(adobe_folder, found_pkgs) else: print("Cannot find both an *_Install.pkg and *_Uninstall.pkg for {}... " "Skipping...".format(adobe_folder)) # If we did not find any pkg pairs to import if found_pkgs == 0: print("ERROR: No Adobe 2021 pkg pairs found, exiting...") sys.exit(1) # Else, run the recipe list ADOBE_LIST else: run_list() def create_list(adobe_folder, found_pkgs): ''' Create recipe list ''' # Create an empty file at ADOBE_List, if this is the 1st found pkg if found_pkgs == 1: open(ADOBE_LIST, 'w').close() # var declaration library_dir = os.path.expanduser('~/Library/') override_path = os.path.join(library_dir, 'AutoPkg', 'RecipeOverrides', \ adobe_folder + '.' \ + RECIPE_TYPE + '.recipe') override_name = 'local.' + RECIPE_TYPE + '.' + adobe_folder # If we cannot find the override if not os.path.isfile(override_path): print("Skipping {}, as cannot find override...".format(override_path)) return # Append to ADOBE_LIST list_file = open(ADOBE_LIST, 'a+') list_file.write(override_name + '\n') list_file.close() def run_list(): ''' Run recipe list ''' # Notify we're starting print("Running recipe_list: `{}`".format(ADOBE_LIST)) print() # The subprocess command cmd_args = ['/usr/local/bin/autopkg', 'run', '-v', '--recipe-list', ADOBE_LIST, '--report-plist', REPORT_PATH] # Notify what command we're about to run. print('Running `{}`...'.format(cmd_args)) # Run the command subprocess.call(cmd_args) if __name__ == '__main__': # Parse recipe type argument PARSER = argparse.ArgumentParser() PARSER.add_argument('type', type=str, help='Recipe type, either "munki" or "jss"') ARG_PARSER = PARSER.parse_args() RECIPE_TYPE = ARG_PARSER.type.lower() # Constants DOWNLOADS_PATH = os.path.expanduser('~/Downloads/') ADOBE_LIST = os.path.join(DOWNLOADS_PATH + 'adobe2021_list.txt') REPORT_PATH = os.path.join(DOWNLOADS_PATH + 'adobe2021_report.plist') # Call main def main() Adobe 2020/Adobe2020Versioner.py METASEP #!/usr/local/autopkg/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for AdobeCC2020Versioner class ''' # Standard Imports from __future__ import absolute_import import json import os import re import xml import zipfile from xml.etree import ElementTree # AutoPkg imports # pylint: disable = import-error try: from plistlib import loads as load_plist except ImportError: from FoundationPlist import readPlistFromString as load_plist from autopkglib import Processor, ProcessorError # Define class __all__ = ['Adobe2020Versioner'] __version__ = ['1.4.1'] # Class def class Adobe2020Versioner(Processor): ''' Parses generated Adobe Admin Console 2020 pkgs for detailed application path and bundle version info. ''' description = __doc__ input_variables = { } output_variables = { 'additional_pkginfo': { 'description': 'Some pkginfo fields extracted from the Adobe metadata.', }, 'jss_inventory_name': { 'description': 'Application title for jamf pro smart group criteria.', }, 'version': { 'description': ('The value of CFBundleShortVersionString for the app bundle. ' 'This may match user_facing_version, but it may also be more ' 'specific and add another version component.'), }, } def main(self): ''' Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise if corresponding *_Uninstall.pkg is missing. Then determine a pkginfo, version and jss inventory name from the Adobe*_Install.pkg ''' # var declaration download_path = os.path.expanduser('~/Downloads') install_lang = None # Path to Adobe*_Install.pkg in the titles Downloads folder self.env['PKG'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Install.pkg')) self.output("install_pkg {}".format(self.env['PKG'])) # Path to Adobe*_Uninstall.pkg n the titles Downloads folder self.env['uninstaller_pkg_path'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Uninstall.pkg')) self.output("uninstall_pkg {}".format(self.env['uninstaller_pkg_path'])) # Path to titles optionXML.xml option_xml_path = os.path.join(self.env['PKG'], 'Contents', 'Resources', 'optionXML.xml') self.output("Processing {}".format(option_xml_path)) # Try to parse option_xml, raise if an issue try: option_xml = ElementTree.parse(option_xml_path) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(option_xml_path, err_msg)) # Check to see if HDMedia keys set for hd_media in option_xml.findall('.//HDMedias/HDMedia'): # If we have HDMedia, set vars if hd_media.findtext('MediaType') == 'Product': install_lang = hd_media.findtext('installLang') self.env['sap_code'] = hd_media.findtext('SAPCode') self.env['target_folder'] = hd_media.findtext('TargetFolderName') # If no HDMedia is found, then install_lang will be none if install_lang is None: # Get vars for RIBS media for ribs_media in option_xml.findall('.//Medias/Media'): install_lang = ribs_media.findtext('installLang') self.env['sap_code'] = ribs_media.findtext('SAPCode') self.env['target_folder'] = ribs_media.findtext('TargetFolderName') # Display progress self.output("sap_code: {}".format(self.env['sap_code'])) self.output("target_folder: {}".format(self.env['target_folder'])) # Get app_json var self.env['app_json'] = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], 'Application.json') # If Application.json exists, we're looking at a HD installer if os.path.exists(self.env['app_json']): if not self.env['sap_code'] == 'APRO': # Process HD installer self.process_hd_installer_pt1() else: # If not a HD installer Acrobat is a 'current' title with a # RIBS PKG installer we can extract needed metadata from self.env['proxy_xml'] = (os.path.join(self.env['PKG'], 'Contents/Resources/Setup', self.env['target_folder'], 'proxy.xml')) # If proxy_xml does not exist, raise if not os.path.exists(self.env['proxy_xml']): raise ProcessorError("APRO selected, proxy.xml not found at: {}" .format(self.env['proxy_xml'])) # Else, process the APRO (Acrobat) installer self.process_apro_installer() def process_apro_installer(self): ''' Process APRO (Acrobat) installer ''' # Progress notification self.output("Processing Acrobat installer") self.output("proxy_xml: {}".format(self.env['proxy_xml'])) # Try to parse proxy_xml, raise if an issue try: parse_xml = ElementTree.parse(self.env['proxy_xml']) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(self.env['proxy_xml'], err_msg)) # Get root of xml root = parse_xml.getroot() # Get app_bundle app_bundle_text = (root.findtext ('./ThirdPartyComponent/Metadata/Properties/Property[@name=\'path\']')) self.env['app_bundle'] = app_bundle_text.split('/')[1] self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path app_path_text = root.findtext('./InstallDir/Platform') self.env['app_path'] = app_path_text.split('/')[1] self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # Get app_version self.env['app_version'] = root.findtext('./InstallerProperties/Property[@name=\'ProductVersion\']') self.output("app_version: {}".format(self.env['app_version'])) # Get vers_compare_key self.env['vers_compare_key'] = 'CFBundleShortVersionString' self.output("vers_compare_key: {}".format(self.env['vers_compare_key'])) # Set bundle id self.env['app_bundle_id'] = 'com.adobe.Acrobat.Pro' self.output("app_bundle_id: {}".format(self.env['app_bundle_id'])) # Create pkginfo with found details self.create_pkginfo() def process_hd_installer_pt1(self): ''' Process HD installer - part 1 ''' # Progress notification self.output("Processing HD installer") # Read in app_json file with open(self.env['app_json']) as json_file: # Try to parse app_json as json, raise if an issue try: load_json = json.load(json_file) except json.JSONDecodeError as err_msg: raise ProcessorError("Failed to parse {}: {}".format(self.env['app_json'], err_msg)) # Get app_launch app_launch = load_json['AppLaunch'] self.output("app_launch: {}".format(app_launch)) # Get app_details, app_bundle and app_path app_details = list(re.split('/', app_launch)) if app_details[2].endswith('.app'): app_bundle = app_details[2] app_path = app_details[1] else: app_bundle = app_details[1] app_path = list(re.split('/', (load_json['InstallDir']['value'])))[1] # Get app_bundle self.env['app_bundle'] = app_bundle self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path self.env['app_path'] = app_path self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # 2nd part of process self.process_hd_installer_pt2(load_json) def process_hd_installer_pt2(self, load_json): ''' Process HD installer - part 2 ''' # Get name of the zip_file were to open zip_file = load_json['Packages']['Package'][0]['PackageName'] self.output("zip_file: {}".format(zip_file)) # Get pimx_dir if zip_file.endswith('-LearnPanel'): zip_file = load_json['Packages']['Package'][1]['PackageName'] pimx_dir = '2' else: pimx_dir = '1' self.output("pimx_dir: {}".format(pimx_dir)) # Get zip_path zip_path = (os.path.join(self.env['PKG'], 'Contents/Resources/HD', self.env['target_folder'], zip_file + '.zip')) self.output("zip_path: {}".format(zip_path)) # Open zip file, raise if fails try: with zipfile.ZipFile(zip_path, mode='r') as my_zip: # Read in pimx file with my_zip.open(zip_file + '.pimx') as my_txt: # Read in pimx file pimx_txt = my_txt.read() # Try to parse pimx file as XML, raise exception if fails try: xml_tree = ElementTree.fromstring(pimx_txt) # Try to read info.plist from within zip_bundle self.read_info_plist(my_zip, pimx_dir, xml_tree, zip_path) # If we cannot read in the pimx except xml.etree.ElementTree.ParseError as err_msg: self.output("Parsing {} failed with: {}, checking {}" .format(zip_file, err_msg, self.env['app_json'])) # Read in values from app_json self.parse_app_json(load_json) except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}".format(zip_path, err_msg)) # Now we have the deets, let's use them self.create_pkginfo() def get_generic_keys(self): ''' Generic keys to get regardless of title ''' # Progress notification self.env['installed_path'] = os.path.join('/Applications', self.env['app_path'], self.env['app_bundle']) self.output("installed_path: {}".format(self.env['installed_path'])) # Get display_name if not self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2020'): self.env['display_name'] = self.env['app_path'] + ' 2020' elif self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2020'): self.env['display_name'] = self.env['app_path'] + ' 2020' else: self.env['display_name'] = self.env['app_path'] # Progress notification self.output("display_name: {}".format(self.env['display_name'])) def read_info_plist(self, my_zip, pimx_dir, xml_tree, zip_path): ''' Try to read info.plist from within zip_bundle ''' # Loop through .pmx's Assets, look for target=[INSTALLDIR], # then grab Assets Source. # Break when found .app/Contents/Info.plist for xml_elem in xml_tree.findall('Assets'): for xml_item in xml_elem.getchildren(): # Below special tweak for the non-Classic Lightroom bundle if (xml_item.attrib['target'].upper().startswith('[INSTALLDIR]') and not xml_item.attrib['target'].endswith('Icons')): # Get bundle_location bundle_location = xml_item.attrib['source'] self.output("bundle_location: {}".format(bundle_location)) else: continue # Amend bundle_location as needed if not bundle_location.startswith('[StagingFolder]'): continue if bundle_location.endswith('Icons') or \ bundle_location.endswith('AMT'): continue bundle_location = bundle_location[16:] # Create zip_bundle if bundle_location.endswith('.app'): zip_bundle = (os.path.join(pimx_dir, bundle_location, 'Contents/Info.plist')) else: zip_bundle = (os.path.join(pimx_dir, bundle_location, self.env['app_bundle'], 'Contents/Info.plist')) # Try to read info.plist from within zip_bundle try: with my_zip.open(zip_bundle) as my_plist: info_plist = my_plist.read() data = load_plist(info_plist) # If the App is Lightroom (Classic or non-Classic) # we need to compare a different value in Info.plist if self.env['sap_code'] == 'LTRM' or \ self.env['sap_code'] == 'LRCC': self.env['vers_compare_key'] = 'CFBundleVersion' else: self.env['vers_compare_key'] = ( 'CFBundleShortVersionString') # Get version from info.plist app_version = data[self.env['vers_compare_key']] # Get bundleid from info.plist self.env['app_bundle_id'] = data['CFBundleIdentifier'] # Progress notifications self.output("vers_compare_key: {}" .format(self.env['vers_compare_key'])) self.output("app_bundle_id: {}" .format(self.env['app_bundle_id'])) self.output("staging_folder: {}" .format(bundle_location)) self.output("staging_folder_path: {}" .format(zip_bundle)) self.env['app_version'] = app_version self.output("app_version: {}".format(self.env['app_version'])) break # If we cannot read the zip file except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}" .format(zip_path, err_msg)) def parse_app_json(self, load_json): ''' Read in values from app_json ''' # Get vers_compare_key self.env['vers_compare_key'] = 'CFBundleShortVersionString' # Get app_version, cautiously for now for only certain apps if self.env['sap_code'] == 'KBRG': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.bridge10' elif self.env['sap_code'] == 'ESHR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.dimension' else: raise ProcessorError("Checking app_json for version details but sap code {}," "is neither ESHR nor KBRG".format(self.env['sap_code'])) self.output("app_version: {}".format(self.env['app_version'])) # Get app_bundle for app_launch in load_json['AppLaunch'].split('/'): if app_launch.endswith('.app'): app_bundle = ('/Applications/' + app_launch.split('.app')[0] + '/' + app_launch) self.output("app_bundle: {}".format(app_bundle)) def create_pkginfo(self): ''' Create pkginfo with found details ''' # More var declaration self.env['jss_inventory_name'] = self.env['app_bundle'] self.env['pkg_path'] = self.env['PKG'] self.env['version'] = self.env['app_version'] # Get minimum_os_version from override # https://github.com/autopkg/dataJAR-recipes/issues/138 pkginfo = { 'minimum_os_version': self.env['MINIMUM_OS_VERSION'] } # Allow the user to provide a display_name string that prevents CreativeCloudVersioner # from overriding it. if 'pkginfo' not in self.env or 'display_name' not in self.env['pkginfo']: pkginfo['display_name'] = self.env['display_name'] # Create pkginfo is missing from installs array if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']: pkginfo['installs'] = [{ self.env['vers_compare_key']: self.env['version'], 'path': self.env['installed_path'], 'type': 'application', 'version_comparison_key': self.env['vers_compare_key'], 'CFBundleIdentifier': self.env['app_bundle_id'], }] # Notify of additional_pkginfo self.env['additional_pkginfo'] = pkginfo self.output("additional_pkginfo: {}".format(self.env['additional_pkginfo'])) if __name__ == '__main__': PROCESSOR = Adobe2020Versioner() Adobe 2020/Adobe2020Importer.py METASEP #!/usr/bin/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION Imports Adobe 2020 titles found in running users ~/Downloads ''' from __future__ import absolute_import from __future__ import print_function import argparse import glob import os import subprocess import sys __version__ = '1.1' def main(): '''Gimme some main''' adobe_folders = [] for some_item in os.listdir(DOWNLOADS_PATH): some_path = os.path.join(DOWNLOADS_PATH, some_item) if os.path.isdir(some_path): if some_item.startswith('Adobe') and some_item.endswith('2020'): adobe_folders.append(some_item) if not len(adobe_folders): print('No Adobe*2020 folders found in %s, exiting...' % DOWNLOADS_PATH) sys.exit(1) if len(adobe_folders) == 1: print('1 Adobe 2020 folder found, creating recipe list...') else: print('%s Adobe 2020 folder found, creating recipe list...' % len(adobe_folders)) open(ADOBE_LIST, 'w').close() pkg_checker(adobe_folders) def pkg_checker(adobe_folders): ''' Check that we have the Install_pkg's & proceed if we do''' found_pkgs = 0 print('Looking for pkgs...') for adobe_folder in sorted(adobe_folders): try: install_pkg = glob.glob(os.path.join(DOWNLOADS_PATH, adobe_folder, \ 'Build', '*_Install.pkg'))[0] print('Found {0}...'.format(install_pkg)) if os.path.exists(install_pkg): create_list(adobe_folder) found_pkgs += 1 else: print('Cannot find pkg ({0}), for {1}... Skipping...'.format\ (install_pkg, adobe_folder)) except IndexError as err_msg: print('Skipping {0}, as cannot find Install.pkg: {1}...'.format(adobe_folder, err_msg)) if found_pkgs == 0: print('No pkgs found, exiting...') sys.exit(1) else: run_list() def create_list(adobe_folder): ''' Create recipe list ''' library_dir = os.path.expanduser('~/Library/') override_path = os.path.join(library_dir, 'AutoPkg', 'RecipeOverrides', \ adobe_folder + '.' \ + RECIPE_TYPE + '.recipe') override_name = 'local.' + RECIPE_TYPE + '.' + adobe_folder if not os.path.isfile(override_path): print('Skipping {0}, as cannot find override...'.format(override_path)) return list_file = open(ADOBE_LIST, 'a+') list_file.write(override_name + '\n') list_file.close() def run_list(): '''Run recipe list''' if os.path.exists(ADOBE_LIST): print('Running recipe_list: `{0}`'.format(ADOBE_LIST)) print() cmd_args = ['/usr/local/bin/autopkg', 'run', '-v', '--recipe-list', ADOBE_LIST, \ '--report-plist', REPORT_PATH] print('Running `{0}`...'.format(cmd_args)) subprocess.call(cmd_args) else: print('Recipe list not populated, make sure you have the needed overrides in place....') if __name__ == '__main__': # Try to locate autopkg if not os.path.exists('/usr/local/bin/autopkg'): print('Cannot find autopkg') sys.exit(1) # Parse recipe type argument PARSER = argparse.ArgumentParser() PARSER.add_argument('type', type=str, help='Recipe type, either "munki" or "jss"') ARG_PARSER = PARSER.parse_args() RECIPE_TYPE = ARG_PARSER.type.lower() # Constants DOWNLOADS_PATH = os.path.expanduser('~/Downloads/') ADOBE_LIST = os.path.join(DOWNLOADS_PATH + 'adobe2020_list.txt') REPORT_PATH = os.path.join(DOWNLOADS_PATH + 'adobe2020_report.plist') main() Adobe Admin Console Packages/AdobeAdminConsolePackagesVersioner.py METASEP
[ { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = Path(__file__).cwd().as_posix()\n self.output(f\"aacp_parent_dir': {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n self.env['aacp_autopkg_json'] = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get applications dict from the json\n for application_data in self.env['aacp_autopkg_json']:\n if application_data['sap_code'] == self.env['aacp_application_sap_code']:\n for aacp_version_json in application_data['versions'].keys():\n try:\n if aacp_version_json == self.env['aacp_application_major_version']:\n self.env['aacp_matched_json'] = (application_data['versions']\n [self.env['aacp_application_major_version']])\n self.output(f\"aacp_matched_json: {self.env['aacp_matched_json']}\")\n except KeyError as err_msg:\n raise ProcessorError(f\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: \"\n f\"{self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}... \"\n f\"exiting...\") from err_msg\n # If we found a match\n if 'aacp_matched_json' in self.env:\n self.process_matched_json(load_json)\n\n\n def process_matched_json(self, load_json):\n '''\n Get metadata with the aid of self.env['aacp_matched_json']\n '''\n # Applications version, if not APRO\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['version'] = load_json[self.env['aacp_matched_json']['app_json_version_key']]\n self.output(f\"version: {self.env['version']}\")\n\n # Applications bundle id\n self.env['aacp_application_bundle_id'] = self.env['aacp_matched_json']['app_bundle_id']\n self.output(f\"aacp_application_bundle_id: {self.env['aacp_application_bundle_id']}\")\n\n # Applications minimum os, if APRO get from self.env['aacp_matched_json']\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['aacp_application_minimum_os'] = (re.search(self.env['aacp_matched_json']\n ['minos_regex'],\n load_json['SystemRequirement']\n ['CheckCompatibility']\n ['Content'])[1])\n self.output(f\"aacp_application_minimum_os: {self.env['aacp_application_minimum_os']}\")\n else:\n self.env['aacp_application_minimum_os'] = self.env['aacp_matched_json']['minos_version']\n\n # Applications version comparison key\n self.env['aacp_version_compare_key'] = (self.env['aacp_matched_json']\n ['version_comparison_key'])\n self.output(f\"aacp_version_compare_key: {self.env['aacp_version_compare_key']}\")\n\n # Applications display name\n self.env['aacp_application_display_name'] = self.env['aacp_matched_json']['display_name']\n self.output(f\"aacp_application_display_name: {self.env['aacp_application_display_name']}\")\n\n # Full path to the application bundle on disk, as per Terminal etc, not Finder\n self.env['aacp_application_full_path'] = self.env['aacp_matched_json']['app_path']\n\n # Get description if missing:\n if not 'aacp_application_description' in self.env:\n self.env['aacp_application_description'] = (self.env['aacp_matched_json']\n ['app_description'])\n self.output(f\"aacp_application_description: description missing, set from \"\n f\"aacp_matched_json: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get additional_blocking_applications\n if 'additional_blocking_applications' in self.env['aacp_matched_json']:\n for additional_blocking_application in (self.env['aacp_matched_json']\n ['additional_blocking_applications']):\n self.env['aacp_blocking_applications'].append(additional_blocking_application)\n self.env['aacp_blocking_applications'] = (\n sorted(set(self.env['aacp_blocking_applications'])))\n self.output(f\"aacp_blocking_applications updated: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # Now we have the deets, let's use them", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = Path(__file__).cwd().as_posix()\n self.output(f\"aacp_parent_dir': {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n self.env['aacp_autopkg_json'] = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get applications dict from the json\n for application_data in self.env['aacp_autopkg_json']:\n if application_data['sap_code'] == self.env['aacp_application_sap_code']:\n for aacp_version_json in application_data['versions'].keys():\n try:\n if aacp_version_json == self.env['aacp_application_major_version']:\n self.env['aacp_matched_json'] = (application_data['versions']\n [self.env['aacp_application_major_version']])\n self.output(f\"aacp_matched_json: {self.env['aacp_matched_json']}\")\n except KeyError as err_msg:\n raise ProcessorError(f\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: \"\n f\"{self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}... \"\n f\"exiting...\") from err_msg\n # If we found a match\n if 'aacp_matched_json' in self.env:", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = Path(__file__).cwd().as_posix()\n self.output(f\"aacp_parent_dir': {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n self.env['aacp_autopkg_json'] = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get applications dict from the json\n for application_data in self.env['aacp_autopkg_json']:\n if application_data['sap_code'] == self.env['aacp_application_sap_code']:\n for aacp_version_json in application_data['versions'].keys():\n try:\n if aacp_version_json == self.env['aacp_application_major_version']:\n self.env['aacp_matched_json'] = (application_data['versions']\n [self.env['aacp_application_major_version']])\n self.output(f\"aacp_matched_json: {self.env['aacp_matched_json']}\")\n except KeyError as err_msg:\n raise ProcessorError(f\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: \"\n f\"{self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}... \"\n f\"exiting...\") from err_msg\n # If we found a match\n if 'aacp_matched_json' in self.env:\n self.process_matched_json(load_json)\n\n\n def process_matched_json(self, load_json):\n '''\n Get metadata with the aid of self.env['aacp_matched_json']\n '''\n # Applications version, if not APRO\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['version'] = load_json[self.env['aacp_matched_json']['app_json_version_key']]\n self.output(f\"version: {self.env['version']}\")\n\n # Applications bundle id\n self.env['aacp_application_bundle_id'] = self.env['aacp_matched_json']['app_bundle_id']\n self.output(f\"aacp_application_bundle_id: {self.env['aacp_application_bundle_id']}\")\n\n # Applications minimum os, if APRO get from self.env['aacp_matched_json']\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['aacp_application_minimum_os'] = (re.search(self.env['aacp_matched_json']\n ['minos_regex'],\n load_json['SystemRequirement']\n ['CheckCompatibility']\n ['Content'])[1])\n self.output(f\"aacp_application_minimum_os: {self.env['aacp_application_minimum_os']}\")\n else:\n self.env['aacp_application_minimum_os'] = self.env['aacp_matched_json']['minos_version']\n\n # Applications version comparison key\n self.env['aacp_version_compare_key'] = (self.env['aacp_matched_json']\n ['version_comparison_key'])\n self.output(f\"aacp_version_compare_key: {self.env['aacp_version_compare_key']}\")\n\n # Applications display name\n self.env['aacp_application_display_name'] = self.env['aacp_matched_json']['display_name']\n self.output(f\"aacp_application_display_name: {self.env['aacp_application_display_name']}\")\n\n # Full path to the application bundle on disk, as per Terminal etc, not Finder\n self.env['aacp_application_full_path'] = self.env['aacp_matched_json']['app_path']\n\n # Get description if missing:\n if not 'aacp_application_description' in self.env:\n self.env['aacp_application_description'] = (self.env['aacp_matched_json']\n ['app_description'])\n self.output(f\"aacp_application_description: description missing, set from \"\n f\"aacp_matched_json: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get additional_blocking_applications\n if 'additional_blocking_applications' in self.env['aacp_matched_json']:\n for additional_blocking_application in (self.env['aacp_matched_json']\n ['additional_blocking_applications']):\n self.env['aacp_blocking_applications'].append(additional_blocking_application)\n self.env['aacp_blocking_applications'] = (\n sorted(set(self.env['aacp_blocking_applications'])))\n self.output(f\"aacp_blocking_applications updated: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # Now we have the deets, let's use them\n self.create_pkginfo()\n\n\n def create_pkginfo(self):\n '''\n Create pkginfo with found details\n '''\n\n # var declaration\n pkginfo = {}\n\n # Set pkginfo variables\n if self.env['aacp_application_architecture_type']:\n pkginfo['supported_architectures'] = [self.env['aacp_application_architecture_type']]\n\n if self.env['aacp_application_description']:\n pkginfo['description'] = self.env['aacp_application_description']\n\n if self.env['aacp_application_display_name']:\n pkginfo['display_name'] = self.env['aacp_application_display_name']\n\n if self.env['aacp_blocking_applications']:\n pkginfo['blocking_applications'] = self.env['aacp_blocking_applications']\n\n if self.env['aacp_application_minimum_os']:\n pkginfo['minimum_os_version'] = self.env['aacp_application_minimum_os']\n\n # Create pkginfo is missing from installs array\n #if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']:\n pkginfo['installs'] = [{\n 'CFBundleIdentifier': self.env['aacp_application_bundle_id'],\n self.env['aacp_version_compare_key']: self.env['version'],\n 'path': self.env['aacp_application_full_path'],\n 'type': 'application',\n 'version_comparison_key': self.env['aacp_version_compare_key']\n }]\n\n # Notify of additional_pkginfo\n self.env['additional_pkginfo'] = pkginfo\n self.output(f\"additional_pkginfo: {self.env['additional_pkginfo']}\")\n\n\nif __name__ == '__main__':", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:", "type": "common" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = Path(__file__).cwd().as_posix()\n self.output(f\"aacp_parent_dir': {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:", "type": "common" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n", "type": "non_informative" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n", "type": "non_informative" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "type": "non_informative" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = Path(__file__).cwd().as_posix()\n self.output(f\"aacp_parent_dir': {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n", "type": "non_informative" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture'))\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64 nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = Path(__file__).cwd().as_posix()\n self.output(f\"aacp_parent_dir': {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n self.env['aacp_autopkg_json'] = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get applications dict from the json\n for application_data in self.env['aacp_autopkg_json']:\n if application_data['sap_code'] == self.env['aacp_application_sap_code']:\n for aacp_version_json in application_data['versions'].keys():\n try:\n if aacp_version_json == self.env['aacp_application_major_version']:\n self.env['aacp_matched_json'] = (application_data['versions']\n [self.env['aacp_application_major_version']])\n self.output(f\"aacp_matched_json: {self.env['aacp_matched_json']}\")\n except KeyError as err_msg:\n raise ProcessorError(f\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: \"\n f\"{self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}... \"\n f\"exiting...\") from err_msg\n # If we found a match\n if 'aacp_matched_json' in self.env:\n self.process_matched_json(load_json)\n\n\n def process_matched_json(self, load_json):\n '''\n Get metadata with the aid of self.env['aacp_matched_json']\n '''\n # Applications version, if not APRO\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['version'] = load_json[self.env['aacp_matched_json']['app_json_version_key']]\n self.output(f\"version: {self.env['version']}\")\n\n # Applications bundle id\n self.env['aacp_application_bundle_id'] = self.env['aacp_matched_json']['app_bundle_id']\n self.output(f\"aacp_application_bundle_id: {self.env['aacp_application_bundle_id']}\")\n\n # Applications minimum os, if APRO get from self.env['aacp_matched_json']\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['aacp_application_minimum_os'] = (re.search(self.env['aacp_matched_json']\n ['minos_regex'],\n load_json['SystemRequirement']\n ['CheckCompatibility']\n ['Content'])[1])\n self.output(f\"aacp_application_minimum_os: {self.env['aacp_application_minimum_os']}\")\n else:\n self.env['aacp_application_minimum_os'] = self.env['aacp_matched_json']['minos_version']\n\n # Applications version comparison key\n self.env['aacp_version_compare_key'] = (self.env['aacp_matched_json']\n ['version_comparison_key'])\n self.output(f\"aacp_version_compare_key: {self.env['aacp_version_compare_key']}\")\n\n # Applications display name\n self.env['aacp_application_display_name'] = self.env['aacp_matched_json']['display_name']\n self.output(f\"aacp_application_display_name: {self.env['aacp_application_display_name']}\")\n\n # Full path to the application bundle on disk, as per Terminal etc, not Finder\n self.env['aacp_application_full_path'] = self.env['aacp_matched_json']['app_path']\n\n # Get description if missing:\n if not 'aacp_application_description' in self.env:\n self.env['aacp_application_description'] = (self.env['aacp_matched_json']\n ['app_description'])\n self.output(f\"aacp_application_description: description missing, set from \"\n f\"aacp_matched_json: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get additional_blocking_applications\n if 'additional_blocking_applications' in self.env['aacp_matched_json']:\n for additional_blocking_application in (self.env['aacp_matched_json']\n ['additional_blocking_applications']):\n self.env['aacp_blocking_applications'].append(additional_blocking_application)\n self.env['aacp_blocking_applications'] = (\n sorted(set(self.env['aacp_blocking_applications'])))\n self.output(f\"aacp_blocking_applications updated: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # Now we have the deets, let's use them\n self.create_pkginfo()\n\n\n def create_pkginfo(self):\n '''\n Create pkginfo with found details\n '''\n\n # var declaration\n pkginfo = {}\n\n # Set pkginfo variables\n if self.env['aacp_application_architecture_type']:\n pkginfo['supported_architectures'] = [self.env['aacp_application_architecture_type']]\n\n if self.env['aacp_application_description']:\n pkginfo['description'] = self.env['aacp_application_description']\n\n if self.env['aacp_application_display_name']:\n pkginfo['display_name'] = self.env['aacp_application_display_name']\n\n if self.env['aacp_blocking_applications']:\n pkginfo['blocking_applications'] = self.env['aacp_blocking_applications']\n\n if self.env['aacp_application_minimum_os']:\n pkginfo['minimum_os_version'] = self.env['aacp_application_minimum_os']\n\n # Create pkginfo is missing from installs array\n #if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']:\n pkginfo['installs'] = [{\n 'CFBundleIdentifier': self.env['aacp_application_bundle_id'],\n self.env['aacp_version_compare_key']: self.env['version'],", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom pathlib import Path\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesVersioner']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesVersioner(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',", "type": "random" } ]
[ " self.process_adobe_autopkg_application_data(load_json)", " self.create_pkginfo()", " self.process_optionxml_xml()", " self.process_apro_installer()", " self.process_matched_json(load_json)", " self.process_hd_installer()", " PROCESSOR = AdobeAdminConsolePackagesVersioner()", " self.process_adobe_autopkg_application_data(None)", " load_json = json.load(json_file)", " self.env['aacp_autopkg_json'] = json.load(json_file)", " # Try to parse proxy_xml, raise if an issue", "", " WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", " # Progress notification", " if not self.env['aacp_application_architecture_type'] in ['arm64', 'x64']:", " self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")", " }", " 'path': self.env['aacp_application_full_path'],", " }," ]
METASEP
41
autopkg__datajar-recipes
autopkg__datajar-recipes METASEP ngrok/ngrokVersioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for ngrokVersioner class ''' # Standard Imports from __future__ import absolute_import import subprocess import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['ngrokVersioner'] __version__ = '1.0' # pylint: disable = too-few-public-methods class ngrokVersioner(Processor): ''' Returns the version from the ngrok binary Raising if the key is not found. ''' description = __doc__ input_variables = { 'binary_path': { 'required': True, 'description': ('Path to the ngrok binary.'), }, } output_variables = { 'version': { 'description': ('Version of the ngrok binary.'), }, } def main(self): ''' See docstring for ngrokVersioner class ''' # var declaration version = None # Progress notification self.output("Looking for: {}".format(self.env['binary_path'])) # If binary exists if os.path.isfile(self.env['binary_path']): # Get binary version, from: https://www.ngrok.com/doc/9.54.0/Use.htm#Help_command # raise if we error try: version = subprocess.check_output([self.env['binary_path'], '-v'] ).split()[2].decode('utf-8') except subprocess.CalledProcessError: raise ProcessorError("Encountered an error when trying to get the " "ngrok binary version...") # Raise if binary is missing else: raise ProcessorError("Cannot access ngrok binary at path: {}" .format(self.env['binary_path'])) # We should only get here if we have passed the above, but this is belt and braces if version: self.env['version'] = version self.output("version: {}".format( self.env['version'])) else: raise ProcessorError("version is None") if __name__ == '__main__': PROCESSOR = ngrokVersioner() VMware Fusion 8/VMwareFusion8URLProvider.py METASEP #!/usr/bin/python # # Copyright 2014 Justin Rummel, # # Updates added 2018 by macmule: # https://github.com/autopkg/justinrummel-recipes/pull/7 # https://github.com/autopkg/justinrummel-recipes/pull/14 # # Thanks fuzzylogiq & Sterling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urllib, urllib2, gzip from xml.etree import ElementTree from StringIO import StringIO from autopkglib import Processor, ProcessorError from distutils.version import LooseVersion __all__ = ["VMwareFusion8URLProvider"] # variables VMWARE_BASE_URL = 'https://softwareupdate.vmware.com/cds/vmw-desktop/' FUSION = 'fusion.xml' MAJOR_VERSION = '8' # lock version in class VMwareFusion8URLProvider(Processor): description = "Provides URL to the latest VMware Fusion update release." input_variables = { "product_name": { "required": False, "description": "Default is '%s'." % FUSION, }, "base_url": { "required": False, "description": "Default is '%s." % VMWARE_BASE_URL, }, } output_variables = { "url": { "description": "URL to the latest VMware Fusion update release.", }, "version": { "description": "Version to the latest VMware Fusion update release.", }, } __doc__ = description def core_metadata(self, base_url, product_name, major_version): request = urllib2.Request(base_url+product_name) # print base_url try: vsus = urllib2.urlopen(request) except URLError, e: print e.reason data = vsus.read() # print data try: metaList = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" versions = [] for metadata in metaList: version = metadata.find("version") if (major_version == 'latest' or major_version == version.text.split('.')[0]): versions.append(version.text) if len(versions) == 0: raise ProcessorError("Could not find any versions for the \ major_version '%s'." % major_version) versions.sort(key=LooseVersion) self.latest = versions[-1] # print latest urls = [] for metadata in metaList: url = metadata.find("url") urls.append(url.text) matching = [s for s in urls if self.latest in s] core = [s for s in matching if "core" in s] # print core[0] vsus.close() request = urllib2.Request(base_url+core[0]) try: vLatest = urllib2.urlopen(request) except URLError, e: print e.reason buf = StringIO(vLatest.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() # print data try: metadataResponse = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" relativePath = metadataResponse.find("bulletin/componentList/component/relativePath") # print core[0].replace("metadata.xml.gz", relativePath.text) return base_url+core[0].replace("metadata.xml.gz", relativePath.text) def main(self): # Determine product_name, and base_url. product_name = self.env.get("product_name", FUSION) base_url = self.env.get("base_url", VMWARE_BASE_URL) major_version = self.env.get("major_version", MAJOR_VERSION) self.env["url"] = self.core_metadata(base_url, product_name, major_version) self.output("Found URL %s" % self.env["url"]) self.env["version"] = self.latest self.output("Found Version %s" % self.env["version"]) if __name__ == "__main__": processor = VMwareFusion8URLProvider() processor.execute_shell() VMware Fusion 12/DittoCopier.py METASEP #!/usr/local/autopkg/python # # Copyright 2010 Per Olofsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """See docstring for Copier class""" import glob import os.path import shutil import subprocess from autopkglib import ProcessorError from autopkglib.DmgMounter import DmgMounter __all__ = ["DittoCopier"] class DittoCopier(DmgMounter): """Copies source_path to destination_path. Uses the shell tool ditto in place of python's shutil.copy""" description = __doc__ input_variables = { "source_path": { "required": True, "description": ( "Path to a file or directory to copy. " "Can point to a path inside a .dmg which will be mounted. " "This path may also contain basic globbing characters such as " "the wildcard '*', but only the first result will be " "returned." ), }, "destination_path": {"required": True, "description": "Path to destination."} } output_variables = {} __doc__ = description def copy(self, source_item, dest_item): """Copies source_item to dest_item, overwriting if necessary""" # Remove destination if needed. if os.path.exists(dest_item) and overwrite: try: if os.path.isdir(dest_item) and not os.path.islink(dest_item): shutil.rmtree(dest_item) else: os.unlink(dest_item) except OSError as err: raise ProcessorError(f"Can't remove {dest_item}: {err.strerror}") # Copy file or directory. try: subprocess.run(['ditto', source_item, dest_item], stdout=subprocess.PIPE) self.output(f"Copied {source_item} to {dest_item}") except BaseException as err: raise ProcessorError(f"Can't copy {source_item} to {dest_item}: {err}") def main(self): source_path = self.env["source_path"] # Check if we're trying to copy something inside a dmg. (dmg_path, dmg, dmg_source_path) = self.parsePathForDMG(source_path) self.output( f"Parsed dmg results: dmg_path: {dmg_path}, dmg: {dmg}, " f"dmg_source_path: {dmg_source_path}", verbose_level=2, ) try: if dmg: # Mount dmg and copy path inside. mount_point = self.mount(dmg_path) source_path = os.path.join(mount_point, dmg_source_path) # process path with glob.glob matches = glob.glob(source_path) if len(matches) == 0: raise ProcessorError( f"Error processing path '{source_path}' with glob. " ) matched_source_path = matches[0] if len(matches) > 1: self.output( f"WARNING: Multiple paths match 'source_path' glob '{source_path}':" ) for match in matches: self.output(f" - {match}") if [c for c in "*?[]!" if c in source_path]: self.output( f"Using path '{matched_source_path}' matched from " f"globbed '{source_path}'." ) # do the copy self.copy( matched_source_path, self.env["destination_path"], ) finally: if dmg: self.unmount(dmg_path) if __name__ == "__main__": PROCESSOR = Copier() PROCESSOR.execute_shell() VMware Fusion 11/DittoCopier.py METASEP #!/usr/local/autopkg/python # # Copyright 2010 Per Olofsson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """See docstring for Copier class""" import glob import os.path import shutil import subprocess from autopkglib import ProcessorError from autopkglib.DmgMounter import DmgMounter __all__ = ["DittoCopier"] class DittoCopier(DmgMounter): """Copies source_path to destination_path. Uses the shell tool ditto in place of python's shutil.copy""" description = __doc__ input_variables = { "source_path": { "required": True, "description": ( "Path to a file or directory to copy. " "Can point to a path inside a .dmg which will be mounted. " "This path may also contain basic globbing characters such as " "the wildcard '*', but only the first result will be " "returned." ), }, "destination_path": {"required": True, "description": "Path to destination."} } output_variables = {} __doc__ = description def copy(self, source_item, dest_item): """Copies source_item to dest_item, overwriting if necessary""" # Remove destination if needed. if os.path.exists(dest_item) and overwrite: try: if os.path.isdir(dest_item) and not os.path.islink(dest_item): shutil.rmtree(dest_item) else: os.unlink(dest_item) except OSError as err: raise ProcessorError(f"Can't remove {dest_item}: {err.strerror}") # Copy file or directory. try: subprocess.run(['ditto', source_item, dest_item], stdout=subprocess.PIPE) self.output(f"Copied {source_item} to {dest_item}") except BaseException as err: raise ProcessorError(f"Can't copy {source_item} to {dest_item}: {err}") def main(self): source_path = self.env["source_path"] # Check if we're trying to copy something inside a dmg. (dmg_path, dmg, dmg_source_path) = self.parsePathForDMG(source_path) self.output( f"Parsed dmg results: dmg_path: {dmg_path}, dmg: {dmg}, " f"dmg_source_path: {dmg_source_path}", verbose_level=2, ) try: if dmg: # Mount dmg and copy path inside. mount_point = self.mount(dmg_path) source_path = os.path.join(mount_point, dmg_source_path) # process path with glob.glob matches = glob.glob(source_path) if len(matches) == 0: raise ProcessorError( f"Error processing path '{source_path}' with glob. " ) matched_source_path = matches[0] if len(matches) > 1: self.output( f"WARNING: Multiple paths match 'source_path' glob '{source_path}':" ) for match in matches: self.output(f" - {match}") if [c for c in "*?[]!" if c in source_path]: self.output( f"Using path '{matched_source_path}' matched from " f"globbed '{source_path}'." ) # do the copy self.copy( matched_source_path, self.env["destination_path"], ) finally: if dmg: self.unmount(dmg_path) if __name__ == "__main__": PROCESSOR = Copier() PROCESSOR.execute_shell() VMware Fusion 10/VMwareFusion10URLProvider.py METASEP #!/usr/bin/python # # Copyright 2014 Justin Rummel, # # Updates added 2018 by macmule: # https://github.com/autopkg/justinrummel-recipes/pull/7 # https://github.com/autopkg/justinrummel-recipes/pull/14 # # Thanks fuzzylogiq & Sterling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urllib, urllib2, gzip from xml.etree import ElementTree from StringIO import StringIO from autopkglib import Processor, ProcessorError from distutils.version import LooseVersion __all__ = ["VMwareFusion10URLProvider"] # variables VMWARE_BASE_URL = 'https://softwareupdate.vmware.com/cds/vmw-desktop/' FUSION = 'fusion.xml' MAJOR_VERSION = '10' # lock version in class VMwareFusion10URLProvider(Processor): description = "Provides URL to the latest VMware Fusion update release." input_variables = { "product_name": { "required": False, "description": "Default is '%s'." % FUSION, }, "base_url": { "required": False, "description": "Default is '%s." % VMWARE_BASE_URL, }, } output_variables = { "url": { "description": "URL to the latest VMware Fusion update release.", }, "version": { "description": "Version to the latest VMware Fusion update release.", }, } __doc__ = description def core_metadata(self, base_url, product_name, major_version): request = urllib2.Request(base_url+product_name) # print base_url try: vsus = urllib2.urlopen(request) except URLError, e: print e.reason data = vsus.read() # print data try: metaList = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" versions = [] for metadata in metaList: version = metadata.find("version") if (major_version == 'latest' or major_version == version.text.split('.')[0]): versions.append(version.text) if len(versions) == 0: raise ProcessorError("Could not find any versions for the \ major_version '%s'." % major_version) versions.sort(key=LooseVersion) self.latest = versions[-1] # print latest urls = [] for metadata in metaList: url = metadata.find("url") urls.append(url.text) matching = [s for s in urls if self.latest in s] core = [s for s in matching if "core" in s] # print core[0] vsus.close() request = urllib2.Request(base_url+core[0]) try: vLatest = urllib2.urlopen(request) except URLError, e: print e.reason buf = StringIO(vLatest.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() # print data try: metadataResponse = ElementTree.fromstring(data) except ExpatData: print "Unable to parse XML data from string" relativePath = metadataResponse.find("bulletin/componentList/component/relativePath") # print core[0].replace("metadata.xml.gz", relativePath.text) return base_url+core[0].replace("metadata.xml.gz", relativePath.text) def main(self): # Determine product_name, and base_url. product_name = self.env.get("product_name", FUSION) base_url = self.env.get("base_url", VMWARE_BASE_URL) major_version = self.env.get("major_version", MAJOR_VERSION) self.env["url"] = self.core_metadata(base_url, product_name, major_version) self.output("Found URL %s" % self.env["url"]) self.env["version"] = self.latest self.output("Found Version %s" % self.env["version"]) if __name__ == "__main__": processor = VMwareFusion10URLProvider() processor.execute_shell() Traffic/TrafficXMLParser.py METASEP #!/usr/bin/python # Copyright 2020 dataJAR # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=import-error, too-few-public-methods """See docstring for TrafficXMLParser class""" from __future__ import absolute_import import os from xml.etree import ElementTree from autopkglib import Processor, ProcessorError __all__ = ["TrafficXMLParser"] __version__ = '1.1' class TrafficXMLParser(Processor): """Parses /META-INF/AIR/application.xml from the copied .air installer""" description = __doc__ input_variables = { "app_xml": { "required": True, "description": "Path to the application.xml." }, } output_variables = { "bundleid": { "description": "Bundled ID.", }, "version": { "description": "The value of CFBundleShortVersionString for the app bundle." }, } def main(self): """Parses /META-INF/AIR/application.xml from the copied .air installer""" if not os.path.exists(self.env["app_xml"]): raise ProcessorError("application.xml not found at %s" % self.env["app_xml"]) else: tree = ElementTree.parse(self.env["app_xml"]) for b_id in tree.iterfind('{http://ns.adobe.com/air/application/24.0}id'): self.env["bundleid"] = b_id.text for ver_num in tree.iterfind('{http://ns.adobe.com/air/application/24.0}versionNumber'): self.env["version"] = ver_num.text self.output("bundleid: %s" % self.env["bundleid"]) self.output("version: %s" % self.env["version"]) if __name__ == "__main__": PROCESSOR = TrafficXMLParser() Shared Processors/TempFileFinder.py METASEP #!/usr/local/autopkg/python # # Copyright 2013 Jesse Peterson # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Temp as waiting PR merging and release including PR - https://github.com/autopkg/autopkg/pull/742 # """See docstring for TempFileFinder class""" import os.path from glob import glob from autopkglib import ProcessorError from autopkglib.DmgMounter import DmgMounter __all__ = ["TempFileFinder"] class TempFileFinder(DmgMounter): """Finds a filename for use in other Processors. Currently only supports glob filename patterns. Requires version 0.2.3. """ input_variables = { "pattern": { "description": "Shell glob pattern to match files by", "required": True, }, "find_method": { "description": ( "Type of pattern to match. Currently only " 'supported type is "glob" (also the default)' ), "default": "glob", "required": False, }, } output_variables = { "found_filename": {"description": "Full path of found filename"}, "dmg_found_filename": {"description": "DMG-relative path of found filename"}, "found_basename": {"description": "Basename of found filename"}, } description = __doc__ def globfind(self, pattern): """If multiple files are found the last alphanumerically sorted found file is returned""" glob_matches = glob(pattern, recursive=True) if len(glob_matches) < 1: raise ProcessorError("No matching filename found") glob_matches.sort() return glob_matches[-1] def main(self): pattern = self.env.get("pattern") method = self.env.get("find_method") if method != "glob": raise ProcessorError(f"Unsupported find_method: {method}") source_path = pattern # Check if we're trying to copy something inside a dmg. (dmg_path, dmg, dmg_source_path) = self.parsePathForDMG(source_path) try: if dmg: # Mount dmg and copy path inside. mount_point = self.mount(dmg_path) source_path = os.path.join(mount_point, dmg_source_path) # process path with globbing match = self.globfind(source_path) self.env["found_filename"] = match self.output( f"Found file match: '{self.env['found_filename']}' from globbed '{source_path}'" ) if dmg and match.startswith(mount_point): self.env["dmg_found_filename"] = match[len(mount_point) :].lstrip("/") self.output( f"DMG-relative file match: '{self.env['dmg_found_filename']}'" ) if match.endswith('/'): self.env["found_basename"] = os.path.basename(match.rstrip("/")) else: self.env["found_basename"] = os.path.basename(match) self.output( f"Basename match: '{self.env['found_basename']}'" ) finally: if dmg: self.unmount(dmg_path) if __name__ == "__main__": PROCESSOR = TempFileFinder() PROCESSOR.execute_shell() Shared Processors/JSONFileReader.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for JSONFileReader class ''' # Standard Imports from __future__ import absolute_import import json import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['JSONFileReader'] __version__ = '1.0' # pylint: disable = too-few-public-methods class JSONFileReader(Processor): ''' Parses a JSON file, returning the value of the supplied key. Raising if the key is not found. ''' description = __doc__ input_variables = { 'json_key': { 'required': True, 'description': ('Key to look for, and return the value of'), }, 'json_path': { 'required': True, 'description': ('Path to the JSON file'), }, } output_variables = { 'json_value': { 'description': ('Value of the JSON key'), }, } def main(self): ''' See docstring for JSONFileReader class ''' # Progress notification self.output("Looking for: {}".format(self.env['json_path'])) if os.path.isfile(self.env['json_path']): # Read in JSON file with open(self.env['json_path']) as json_file: # Try to parse json_path as json, raise if an issue try: load_json = json.load(json_file) except json.JSONDecodeError as err_msg: raise ProcessorError("Failed to parse {}: {}".format(self.env['json_path'], err_msg)) # Look for value of key json_key, raise if an issue try: self.env['json_value'] = load_json[self.env['json_key']] except KeyError: raise ProcessorError("Cannot find key {} within json file: {}" .format(self.env['json_key'], self.env['json_path'])) else: raise ProcessorError("Cannot access JSON file at path: {}" .format(self.env['json_path'])) self.output("json_value: {}".format(self.env['json_value'])) if __name__ == '__main__': PROCESSOR = JSONFileReader() Shared Processors/DistributionPkgInfo.py METASEP #!/usr/bin/python # Copyright 2020 dataJAR # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=import-error, too-few-public-methods """See docstring for DistributionPkgInfo class""" from __future__ import absolute_import from __future__ import print_function import os import subprocess from xml.etree import ElementTree from autopkglib import Processor, ProcessorError __all__ = ["DistributionPkgInfo"] __version__ = '1.1.1' class DistributionPkgInfo(Processor): """Parses a distribution pkg to pull the info, other formats to be added later""" description = __doc__ input_variables = { "pkg_path": { "required": True, "description": ("Path to the Pkg.."), }, } output_variables = { "pkg_id": { "description": ("The package ID.."), }, "version": { "description": ("The version of the pkg from it's info"), }, } # pylint: disable=too-many-branches def main(self): """Cobbled together from various sources, should extract information from a Distribution pkg""" # Build dir as needed,pinched with <3 from: # https://github.com/autopkg/autopkg/blob/master/Code/autopkglib/FlatPkgUnpacker.py#L72 # Extract pkg info, pinched with <3 from: # https://github.com/munki/munki/blob/master/code/client/munkilib/pkgutils.py#L374 self.env["abspkgpath"] = os.path.join(self.env["pkg_path"]) file_path = os.path.join(self.env["RECIPE_CACHE_DIR"], "downloads") cmd_toc = ['/usr/bin/xar', '-tf', self.env["abspkgpath"]] proc = subprocess.Popen(cmd_toc, bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (toc, err) = proc.communicate() toc = toc.decode("utf-8") .strip().split('\n') if proc.returncode == 0: # Walk trough the TOC entries if not os.path.exists(file_path): os.mkdir(file_path) for toc_entry in [item for item in toc if item.startswith('Distribution')]: cmd_extract = ['/usr/bin/xar', '-xf', self.env["abspkgpath"], \ toc_entry, '-C', file_path] _ = subprocess.call(cmd_extract) else: raise ProcessorError("pkg not found at pkg_path") dist_path = os.path.join(file_path, "Distribution") version = None pkg_id = None if not os.path.exists(dist_path): raise ProcessorError("Cannot find Distribution") else: tree = ElementTree.parse(dist_path) _ = tree.getroot() try: for elem in tree.iter(tag='product'): version = elem.get("version") for elem in tree.iter(tag='pkg-ref'): pkg_id = elem.get("id") except ElementTree.ParseError as err: print(("Can't parse distribution file %s: %s" % ('dist_path', err.strerror))) if not pkg_id: raise ProcessorError("cannot get pkg_id") else: self.env["pkg_id"] = pkg_id if not version: raise ProcessorError("cannot get version") else: self.env["version"] = version os.remove(dist_path) if __name__ == '__main__': PROCESSOR = DistributionPkgInfo() MacTeX Ghostscript/GhostscriptVersioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for GhostscriptVersioner class ''' # Standard Imports from __future__ import absolute_import import subprocess import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['GhostscriptVersioner'] __version__ = '1.0' # pylint: disable = too-few-public-methods class GhostscriptVersioner(Processor): ''' Returns the version from the Ghostscript binary Raising if the key is not found. ''' description = __doc__ input_variables = { 'binary_path': { 'required': True, 'description': ('Path to the Ghostscript binary.'), }, } output_variables = { 'version': { 'description': ('Version of the Ghostscript binary.'), }, } def main(self): ''' See docstring for GhostscriptVersioner class ''' # var declaration version = None # Progress notification self.output("Looking for: {}".format(self.env['binary_path'])) # If binary exists if os.path.isfile(self.env['binary_path']): # Get binary version, from: https://www.ghostscript.com/doc/9.54.0/Use.htm#Help_command # raise if we error try: version = subprocess.check_output([self.env['binary_path'], '-v'] ).split()[2].decode('utf-8') except subprocess.CalledProcessError: raise ProcessorError("Encountered an error when trying to get the " "Ghostscript binary version...") # Raise if binary is missing else: raise ProcessorError("Cannot access Ghostscript binary at path: {}" .format(self.env['binary_path'])) # We should only get here if we have passed the above, but this is belt and braces if version: self.env['version'] = version self.output("version: {}".format( self.env['version'])) else: raise ProcessorError("version is None") if __name__ == '__main__': PROCESSOR = GhostscriptVersioner() BrowserStackLocal/BrowserStackLocalBinaryVersioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for BrowserStackLocalBinaryVersioner class ''' # Standard Imports from __future__ import absolute_import import subprocess import os # AutoPkg imports # pylint: disable = import-error from autopkglib import Processor, ProcessorError __all__ = ['BrowserStackLocalBinaryVersioner'] __version__ = '1.0' # pylint: disable = too-few-public-methods class BrowserStackLocalBinaryVersioner(Processor): ''' Returns the version from the BrowserStackLocal binary Raising if the key is not found. ''' description = __doc__ input_variables = { 'binary_path': { 'required': True, 'description': ('Path to the BrowserStackLocal binary.'), }, } output_variables = { 'version': { 'description': ('Version of the BrowserStackLocal binary.'), }, } def main(self): ''' See docstring for BrowserStackLocalBinaryVersioner class ''' # var declaration version = None # Progress notification self.output("Looking for: {}".format(self.env['binary_path'])) # If binary exists if os.path.isfile(self.env['binary_path']): # Get binary version, from: https://www.browserstack.com/local-testing/binary-params # raise if we error try: version = subprocess.check_output([self.env['binary_path'], '--version'] ).split()[3].decode('utf-8') except subprocess.CalledProcessError: raise ProcessorError("Encountered an error when trying to get the " "BrowserStackLocal binary version...") # Raise if binary is missing else: raise ProcessorError("Cannot access BrowserStackLocal binary at path: {}" .format(self.env['binary_path'])) # We should only get here if we have passed the above, but this is belt and braces if version: self.env['version'] = version self.output("version: {}".format( self.env['version'])) else: raise ProcessorError("version is None") if __name__ == '__main__': PROCESSOR = BrowserStackLocalBinaryVersioner() Adobe CC 2019/AdobeCC2019Versioner.py METASEP #!/usr/bin/python # Copyright 2021 dataJAR # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=import-error """See docstring for AdobeCC2019Versioner class""" from __future__ import absolute_import import glob import json import os import re import zipfile from xml.etree import ElementTree try: from plistlib import loads as load_plist except ImportError: from FoundationPlist import readPlistFromString as load_plist from autopkglib import Processor, ProcessorError __all__ = ['AdobeCC2019Versioner'] __version__ = ['1.2.1'] class AdobeCC2019Versioner(Processor): """Parses generated Adobe Admin Console CC 2019 pkgs for detailed application path and bundle version info""" description = __doc__ input_variables = { } output_variables = { 'additional_pkginfo': { 'description': 'Some pkginfo fields extracted from the Adobe metadata.', }, 'jss_inventory_name': { 'description': 'Application title for jamf pro smart group criteria.', }, 'version': { 'description': ('The value of CFBundleShortVersionString for the app bundle. ' 'This may match user_facing_version, but it may also be more ' 'specific and add another version component.'), }, } def main(self): """Find the Adobe*_Install.pkg in the Downloads dir based on the name""" download_path = os.path.expanduser('~/Downloads') self.env['PKG'] = os.path.join(download_path, self.env['NAME'], \ 'Build', self.env['NAME'] + '_Install.pkg') self.output('pkg %s' % self.env['PKG']) self.env['uninstaller_pkg_path'] = glob.glob(os.path.join\ (os.path.dirname(self.env['PKG']), '*_Uninstall.pkg'))[0] self.process_installer() def process_installer(self): ''' Determine a pkginfo, version and jss inventory name from the created package. Inputs: PKG: Path to the pkg Outputs: app_json/proxy_xml: The path of the files that within the pkg's ''' install_lang = None option_xml_path = os.path.join(self.env['PKG'], 'Contents', 'Resources', 'optionXML.xml') self.output('Processing %s' % option_xml_path) option_xml = ElementTree.parse(option_xml_path) for hd_media in option_xml.findall('.//HDMedias/HDMedia'): if hd_media.findtext('MediaType') == 'Product': install_lang = hd_media.findtext('installLang') self.env['sap_code'] = hd_media.findtext('SAPCode') self.output('SAP Code: %s' % self.env['sap_code']) self.env['target_folder'] = hd_media.findtext('TargetFolderName') if install_lang is None: for ribs_media in option_xml.findall('.//Medias/Media'): install_lang = ribs_media.findtext('installLang') self.env['sap_code'] = ribs_media.findtext('SAPCode') self.output('SAP Code: %s' % self.env['sap_code']) self.env['target_folder'] = ribs_media.findtext('TargetFolderName') self.env['app_json'] = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], 'Application.json') # If Application.json exists, we're looking at a HD installer if os.path.exists(self.env['app_json']): if not self.env['sap_code'] is 'APRO': self.output('Installer is HyperDrive') self.output('app_json: %s' % self.env['app_json']) self.process_hd_installer() else: # If not a HD installer Acrobat is a 'current' title with a # RIBS PKG installer we can extract needed metadata from self.env['proxy_xml'] = os.path.join(self.env['PKG'], 'Contents/Resources/Setup', \ self.env['target_folder'], 'proxy.xml') if not os.path.exists(self.env['proxy_xml']): raise ProcessorError('APRO selected, proxy.xml not found at: %s' \ % self.env['proxy_xml']) else: self.process_apro_installer() def process_apro_installer(self): ''' Process APRO installer - proxy_xml: Path to proxy_xml if pkg is APRO ''' self.output('Processing Acrobat installer') self.output('proxy_xml: %s' % self.env['proxy_xml']) tree = ElementTree.parse(self.env['proxy_xml']) root = tree.getroot() app_bundle_text = root.findtext\ ('./ThirdPartyComponent/Metadata/Properties/Property[@name=\'path\']') app_bundle = app_bundle_text.split('/')[1] self.output('app_bundle: %s' % app_bundle) app_path_text = root.findtext('./InstallDir/Platform') self.output('app_path_text: %s' % app_path_text) app_path = app_path_text.split('/')[1] self.output('app_path: %s' % app_path) installed_path = os.path.join('/Applications', app_path, app_bundle) self.output('installed_path: %s' % installed_path) app_version = root.findtext('./InstallerProperties/Property[@name=\'ProductVersion\']') self.output('app_version: %s' % app_version) self.env['display_name'] = app_path + ' CC 2019' self.output('display_name: %s' % self.env['display_name']) self.env['vers_compare_key'] = 'CFBundleShortVersionString' self.output('vers_compare_key: %s' % self.env['vers_compare_key']) app_bundle_id = 'com.adobe.Acrobat.Pro' self.output('app_bundle_id: %s' % app_bundle_id) self.create_pkginfo(app_bundle, app_bundle_id, app_version, installed_path) # pylint: disable=too-many-branches def process_hd_installer(self): ''' Process HD installer - app_json: Path to the Application JSON from within the PKG ''' #pylint: disable=too-many-locals, too-many-statements self.output('Processing HD installer') with open(self.env['app_json']) as json_file: load_json = json.load(json_file) # AppLaunch is not always in the same format, but is splittable if 'AppLaunch' in load_json: # Bridge CC is HD but does not have AppLaunch app_launch = load_json['AppLaunch'] self.output('app_launch: %s' % app_launch) app_details = list(re.split('/', app_launch)) if app_details[2].endswith('.app'): app_bundle = app_details[2] app_path = app_details[1] else: app_bundle = app_details[1] app_path = list(re.split('/', (load_json['InstallDir']['value'])))[1] self.output('app_bundle: %s' % app_bundle) self.output('app_path: %s' % app_path) installed_path = os.path.join('/Applications', app_path, app_bundle) self.output('installed_path: %s' % installed_path) if not app_path.endswith('CC') and not app_path.endswith('CC 2019'): self.env['display_name'] = app_path + ' CC 2019' elif app_path.endswith('CC') and not app_path.endswith('CC 2019'): self.env['display_name'] = app_path + ' 2019' else: self.env['display_name'] = app_path self.output('display_name: %s' % self.env['display_name']) zip_file = load_json['Packages']['Package'][0]['PackageName'] self.output('zip_file: %s' % zip_file) zip_path = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], zip_file + '.zip') self.output('zip_path: %s' % zip_path) with zipfile.ZipFile(zip_path, mode='r') as myzip: with myzip.open(zip_file + '.pimx') as mytxt: txt = mytxt.read() tree = ElementTree.fromstring(txt) # Loop through .pmx's Assets, look for target=[INSTALLDIR], # then grab Assets Source. # Break when found .app/Contents/Info.plist for elem in tree.findall('Assets'): for i in elem.getchildren(): if i.attrib['target'].upper().startswith('[INSTALLDIR]'): bundle_location = i.attrib['source'] self.output('bundle_location: %s' % bundle_location) else: continue if not bundle_location.startswith('[StagingFolder]'): continue elif bundle_location.endswith('Icons') or \ bundle_location.endswith('AMT'): continue else: bundle_location = bundle_location[16:] if bundle_location.endswith('.app'): zip_bundle = os.path.join('1', bundle_location, \ 'Contents/Info.plist') else: zip_bundle = os.path.join('1', bundle_location, \ app_bundle, 'Contents/Info.plist') try: with myzip.open(zip_bundle) as myplist: plist = myplist.read() data = load_plist(plist) if self.env['sap_code'] == 'LTRM': self.env['vers_compare_key'] = 'CFBundleVersion' else: self.env['vers_compare_key'] = \ 'CFBundleShortVersionString' self.output('vers_compare_key: %s' % \ self.env['vers_compare_key']) app_version = data[self.env['vers_compare_key']] app_bundle_id = data['CFBundleIdentifier'] self.output('app_bundle_id: %s' % app_bundle_id) self.output('staging_folder: %s' % bundle_location) self.output('staging_folder_path: %s' % zip_bundle) self.output('app_version: %s' % app_version) self.output('app_bundle: %s' % app_bundle) break except zipfile.BadZipfile: continue # Now we have the deets, let's use them self.create_pkginfo(app_bundle, app_bundle_id, app_version, installed_path) def create_pkginfo(self, app_bundle, app_bundle_id, app_version, installed_path): """Create pkginfo with found details Args: app_bundle (str): Bundle name app_version (str): Bundle version installed_path (str): The path where the installed item will be installed. """ self.env['jss_inventory_name'] = app_bundle self.env['pkg_path'] = self.env['PKG'] self.env['version'] = app_version pkginfo = { 'display_name': self.env['display_name'], 'minimum_os_version': self.env['MINIMUM_OS_VERSION'] } if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']: pkginfo['installs'] = [{ self.env['vers_compare_key']: self.env['version'], 'path': installed_path, 'type': 'application', 'version_comparison_key': self.env['vers_compare_key'], 'CFBundleIdentifier': app_bundle_id, }] self.env['additional_pkginfo'] = pkginfo self.output('additional_pkginfo: %s' % self.env['additional_pkginfo']) if __name__ == '__main__': PROCESSOR = AdobeCC2019Versioner() Adobe CC 2019/AdobeCC2019Importer.py METASEP #!/usr/bin/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed "as is" by DATA JAR LTD. DESCRIPTION Imports Adobe CC 2019 titles found in running users ~/Downloads ''' from __future__ import absolute_import from __future__ import print_function import argparse import glob import os import subprocess import sys __version__ = '1.1' def main(): '''Gimme some main''' adobe_folders = [] for some_item in os.listdir(DOWNLOADS_PATH): some_path = os.path.join(DOWNLOADS_PATH, some_item) if os.path.isdir(some_path): if some_item.startswith('Adobe') and some_item.endswith('CC2019'): adobe_folders.append(some_item) if not len(adobe_folders): print('No Adobe*CC2019 folders found in %s, exiting...' % DOWNLOADS_PATH) sys.exit(1) if len(adobe_folders) == 1: print('1 Adobe CC 2019 folder found, creating recipe list...') else: print('%s Adobe CC 2019 folder found, creating recipe list...' % len(adobe_folders)) open(ADOBE_LIST, 'w').close() pkg_checker(adobe_folders) def pkg_checker(adobe_folders): '''Check that we have the Install_pkg's & proceed if we do''' found_pkgs = 0 print('Looking for pkgs...') for adobe_folder in sorted(adobe_folders): try: install_pkg = glob.glob(os.path.join(DOWNLOADS_PATH, adobe_folder, \ 'Build', '*_Install.pkg'))[0] print('Found {0}...'.format(install_pkg)) if os.path.exists(install_pkg): create_list(adobe_folder) found_pkgs += 1 else: print('Cannot find pkg ({0}), for {1}... Skipping...'.format\ (install_pkg, adobe_folder)) except IndexError as err_msg: print('Skipping {0}, as cannot find Install.pkg: {1}...'.format(adobe_folder, err_msg)) if found_pkgs == 0: print('No pkgs found, exiting...') sys.exit(1) else: run_list() def create_list(adobe_folder): ''' Create recipe list ''' library_dir = os.path.expanduser('~/Library/') override_path = os.path.join(library_dir, 'AutoPkg', 'RecipeOverrides', \ adobe_folder + '.' \ + RECIPE_TYPE + '.recipe') override_name = 'local.' + RECIPE_TYPE + '.' + adobe_folder if not os.path.isfile(override_path): print('Skipping {0}, as cannot find override...'.format(override_path)) list_file = open(ADOBE_LIST, 'a+') list_file.write(override_name + '\n') list_file.close() def run_list(): '''Run recipe list''' if os.path.exists(ADOBE_LIST): print('Running recipe_list: `{0}`'.format(ADOBE_LIST)) print() cmd_args = ['/usr/local/bin/autopkg', 'run', '-v', '--recipe-list', ADOBE_LIST, \ '--report-plist', REPORT_PATH] print('Running `{0}`...'.format(cmd_args)) subprocess.call(cmd_args) else: print('Recipe list not populated, make sure you have the needed overrides in place....') if __name__ == '__main__': # Try to locate autopkg if not os.path.exists('/usr/local/bin/autopkg'): print('Cannot find autopkg') sys.exit(1) # Parse recipe type argument PARSER = argparse.ArgumentParser() PARSER.add_argument('type', type=str, help='Recipe type, either "munki" or "jss"') PARSER_ARGS = PARSER.parse_args() RECIPE_TYPE = PARSER_ARGS.type.lower() # Constants DOWNLOADS_PATH = os.path.expanduser('~/Downloads/') ADOBE_LIST = os.path.join(DOWNLOADS_PATH + 'adobecc2019_list.txt') REPORT_PATH = os.path.join(DOWNLOADS_PATH + 'adobecc2019_report.plist') main() Adobe 2021/Adobe2021Versioner.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2021, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for AdobeCC2021Versioner class ''' # Standard Imports from __future__ import absolute_import import json import os import re import xml import zipfile from xml.etree import ElementTree # AutoPkg imports # pylint: disable = import-error try: from plistlib import loads as load_plist except ImportError: from FoundationPlist import readPlistFromString as load_plist from autopkglib import Processor, ProcessorError # Define class __all__ = ['Adobe2021Versioner'] __version__ = ['1.4.10'] # Class def class Adobe2021Versioner(Processor): ''' Parses generated Adobe Admin Console 2021 pkgs for detailed application path and bundle version info. ''' description = __doc__ input_variables = { } output_variables = { 'additional_pkginfo': { 'description': 'Some pkginfo fields extracted from the Adobe metadata.', }, 'jss_inventory_name': { 'description': 'Application title for jamf pro smart group criteria.', }, 'version': { 'description': ('The value of CFBundleShortVersionString for the app bundle. ' 'This may match user_facing_version, but it may also be more ' 'specific and add another version component.'), }, 'architecture_type': { 'description': ('The value of ProcessorArchitecture for the package. ' 'This is either -Intel or -ARM to add with renaming the ' 'package disk image'), }, } def main(self): ''' Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise if corresponding *_Uninstall.pkg is missing. Then determine a pkginfo, version and jss inventory name from the Adobe*_Install.pkg ''' # var declaration download_path = os.path.expanduser('~/Downloads') install_lang = None # Path to Adobe*_Install.pkg in the titles Downloads folder self.env['PKG'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Install.pkg')) self.output("install_pkg {}".format(self.env['PKG'])) # Path to Adobe*_Uninstall.pkg n the titles Downloads folder self.env['uninstaller_pkg_path'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Uninstall.pkg')) self.output("uninstall_pkg {}".format(self.env['uninstaller_pkg_path'])) # Path to titles optionXML.xml option_xml_path = os.path.join(self.env['PKG'], 'Contents', 'Resources', 'optionXML.xml') self.output("Processing {}".format(option_xml_path)) # Try to parse option_xml, raise if an issue try: option_xml = ElementTree.parse(option_xml_path) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(option_xml_path, err_msg)) # Check to see if HDMedia keys set for hd_media in option_xml.findall('.//HDMedias/HDMedia'): # If we have HDMedia, set vars if hd_media.findtext('MediaType') == 'Product': install_lang = hd_media.findtext('installLang') self.env['sap_code'] = hd_media.findtext('SAPCode') self.env['target_folder'] = hd_media.findtext('TargetFolderName') # Check for Processor Architecture self.env['architecture_type'] = option_xml.findtext('ProcessorArchitecture') # If no HDMedia is found, then install_lang will be none if install_lang is None: # Get vars for RIBS media for ribs_media in option_xml.findall('.//Medias/Media'): install_lang = ribs_media.findtext('installLang') self.env['sap_code'] = ribs_media.findtext('SAPCode') self.env['target_folder'] = ribs_media.findtext('TargetFolderName') # Display progress self.output("sap_code: {}".format(self.env['sap_code'])) self.output("target_folder: {}".format(self.env['target_folder'])) self.output("architecture_type: {}".format(self.env['architecture_type'])) # Get app_json var self.env['app_json'] = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], 'Application.json') # If Application.json exists, we're looking at a HD installer if os.path.exists(self.env['app_json']): if not self.env['sap_code'] == 'APRO': # Process HD installer self.process_hd_installer_pt1() else: # If not a HD installer Acrobat is a 'current' title with a # RIBS PKG installer we can extract needed metadata from self.env['proxy_xml'] = (os.path.join(self.env['PKG'], 'Contents/Resources/Setup', self.env['target_folder'], 'proxy.xml')) # If proxy_xml does not exist, raise if not os.path.exists(self.env['proxy_xml']): raise ProcessorError("APRO selected, proxy.xml not found at: {}" .format(self.env['proxy_xml'])) # Else, process the APRO (Acrobat) installer self.process_apro_installer() def process_apro_installer(self): ''' Process APRO (Acrobat) installer ''' # Progress notification self.output("Processing Acrobat installer") self.output("proxy_xml: {}".format(self.env['proxy_xml'])) # Try to parse proxy_xml, raise if an issue try: parse_xml = ElementTree.parse(self.env['proxy_xml']) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(self.env['proxy_xml'], err_msg)) # Get root of xml root = parse_xml.getroot() # Get app_bundle app_bundle_text = (root.findtext ('./ThirdPartyComponent/Metadata/Properties/Property[@name=\'path\']')) self.env['app_bundle'] = app_bundle_text.split('/')[1] self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path app_path_text = root.findtext('./InstallDir/Platform') self.env['app_path'] = app_path_text.split('/')[1] self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # Get app_version self.env['app_version'] = (root.findtext ('./InstallerProperties/Property[@name=\'ProductVersion\']')) self.output("app_version: {}".format(self.env['app_version'])) # Get vers_compare_key self.env['vers_compare_key'] = 'CFBundleShortVersionString' self.output("vers_compare_key: {}".format(self.env['vers_compare_key'])) # Set bundle id self.env['app_bundle_id'] = 'com.adobe.Acrobat.Pro' self.output("app_bundle_id: {}".format(self.env['app_bundle_id'])) # Create pkginfo with found details self.create_pkginfo() def process_hd_installer_pt1(self): ''' Process HD installer - part 1 ''' # Progress notification self.output("Processing HD installer") # Read in app_json file with open(self.env['app_json']) as json_file: # Try to parse app_json as json, raise if an issue try: load_json = json.load(json_file) except json.JSONDecodeError as err_msg: raise ProcessorError("Failed to parse {}: {}".format(self.env['app_json'], err_msg)) # Get app_launch app_launch = load_json['AppLaunch'] self.output("app_launch: {}".format(app_launch)) # Get app_details, app_bundle and app_path app_details = list(re.split('/', app_launch)) if app_details[2].endswith('.app'): app_bundle = app_details[2] app_path = app_details[1] else: app_bundle = app_details[1] app_path = list(re.split('/', (load_json['InstallDir']['value'])))[1] # Get app_bundle self.env['app_bundle'] = app_bundle self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path self.env['app_path'] = app_path self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # 2nd part of process self.process_hd_installer_pt2(load_json) def process_hd_installer_pt2(self, load_json): ''' Process HD installer - part 2 ''' # Get name of the zip_file were to open zip_file = load_json['Packages']['Package'][0]['PackageName'] self.output("zip_file: {}".format(zip_file)) # Get pimx_dir if zip_file.endswith('-LearnPanel'): zip_file = load_json['Packages']['Package'][1]['PackageName'] pimx_dir = '2' else: pimx_dir = '1' self.output("pimx_dir: {}".format(pimx_dir)) # Get zip_path zip_path = (os.path.join(self.env['PKG'], 'Contents/Resources/HD', self.env['target_folder'], zip_file + '.zip')) self.output("zip_path: {}".format(zip_path)) # Open zip file, raise if fails try: with zipfile.ZipFile(zip_path, mode='r') as my_zip: # Read in pimx file with my_zip.open(zip_file + '.pimx') as my_txt: # Read in pimx file pimx_txt = my_txt.read() # Try to parse pimx file as XML, raise exception if fails try: xml_tree = ElementTree.fromstring(pimx_txt) # Try to read info.plist from within zip_bundle self.read_info_plist(my_zip, pimx_dir, xml_tree, zip_path) # If we cannot read in the pimx except xml.etree.ElementTree.ParseError as err_msg: self.output("Parsing {} failed with: {}, checking {}" .format(zip_file, err_msg, self.env['app_json'])) # Read in values from app_json self.parse_app_json(load_json) except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}".format(zip_path, err_msg)) # Now we have the deets, let's use them self.create_pkginfo() def get_generic_keys(self): ''' Generic keys to get regardless of title ''' # Progress notification self.env['installed_path'] = os.path.join('/Applications', self.env['app_path'], self.env['app_bundle']) self.output("installed_path: {}".format(self.env['installed_path'])) # Get display_name if not self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2021'): self.env['display_name'] = self.env['app_path'] + ' 2021' elif self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2021'): self.env['display_name'] = self.env['app_path'] + ' 2021' else: self.env['display_name'] = self.env['app_path'] # Progress notification self.output("display_name: {}".format(self.env['display_name'])) def read_info_plist(self, my_zip, pimx_dir, xml_tree, zip_path): ''' Try to read info.plist from within zip_bundle ''' # Loop through .pmx's Assets, look for target=[INSTALLDIR], # then grab Assets Source. # Break when found .app/Contents/Info.plist for xml_elem in xml_tree.findall('Assets'): for xml_item in xml_elem.getchildren(): # Below special tweak for the non-Classic Lightroom bundle if (xml_item.attrib['target'].upper().startswith('[INSTALLDIR]') and not xml_item.attrib['target'].endswith('Icons')): # Get bundle_location bundle_location = xml_item.attrib['source'] self.output("bundle_location: {}".format(bundle_location)) else: continue # Amend bundle_location as needed if not bundle_location.startswith('[StagingFolder]'): continue if bundle_location.endswith('Icons') or \ bundle_location.endswith('AMT'): continue bundle_location = bundle_location[16:] # Create zip_bundle if bundle_location.endswith('.app'): zip_bundle = (os.path.join(pimx_dir, bundle_location, 'Contents/Info.plist')) else: zip_bundle = (os.path.join(pimx_dir, bundle_location, self.env['app_bundle'], 'Contents/Info.plist')) # Try to read info.plist from within zip_bundle try: with my_zip.open(zip_bundle) as my_plist: info_plist = my_plist.read() data = load_plist(info_plist) # If the App is Lightroom (Classic or non-Classic) # we need to compare a different value in Info.plist if self.env['sap_code'] == 'LTRM' or \ self.env['sap_code'] == 'LRCC': self.env['vers_compare_key'] = 'CFBundleVersion' else: self.env['vers_compare_key'] = ( 'CFBundleShortVersionString') # Get version from info.plist app_version = data[self.env['vers_compare_key']] # Get bundleid from info.plist self.env['app_bundle_id'] = data['CFBundleIdentifier'] # Progress notifications self.output("vers_compare_key: {}" .format(self.env['vers_compare_key'])) self.output("app_bundle_id: {}" .format(self.env['app_bundle_id'])) self.output("staging_folder: {}" .format(bundle_location)) self.output("staging_folder_path: {}" .format(zip_bundle)) self.env['app_version'] = app_version self.output("app_version: {}".format(self.env['app_version'])) break # If we cannot read the zip file except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}" .format(zip_path, err_msg)) # pylint: disable = too-many-branches, too-many-statements def parse_app_json(self, load_json): ''' Read in values from app_json ''' # We'll override this later if needed self.env['vers_compare_key'] = 'CFBundleShortVersionString' # Get app_version, cautiously for now for only certain apps if self.env['sap_code'] == 'AICY': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.InCopy' elif self.env['sap_code'] == 'CHAR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Character-Animator.application' elif self.env['sap_code'] == 'DRWV': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.dreamweaver-18.1' elif self.env['sap_code'] == 'ESHR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.dimension' elif self.env['sap_code'] == 'FLPR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Adobe-Animate-2021.application' elif self.env['sap_code'] == 'IDSN': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.InDesign' elif self.env['sap_code'] == 'ILST': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.illustrator' elif self.env['sap_code'] == 'KBRG': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.bridge11' elif self.env['sap_code'] == 'LTRM': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.LightroomClassicCC7' self.env['vers_compare_key'] = 'CFBundleVersion' elif self.env['sap_code'] == 'PHSP': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Photoshop' elif self.env['sap_code'] == 'SBSTA': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.adobe-substance-3d-sampler' elif self.env['sap_code'] == 'SBSTD': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.substance-3d-designer' elif self.env['sap_code'] == 'SBSTP': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.Adobe-Substance-3D-Painter' elif self.env['sap_code'] == 'SPRK': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.xd' elif self.env['sap_code'] == 'STGR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.stager' else: raise ProcessorError("Checking app_json for version details but sap code {}, " "is not within the known list of apps which we know to " "check via their Application.json".format(self.env['sap_code'])) self.output("app_version: {}".format(self.env['app_version'])) # Get app_bundle for app_launch in load_json['AppLaunch'].split('/'): if app_launch.endswith('.app'): app_bundle = ('/Applications/' + app_launch.split('.app')[0] + '/' + app_launch) self.output("app_bundle: {}".format(app_bundle)) def create_pkginfo(self): ''' Create pkginfo with found details ''' # More var declaration self.env['jss_inventory_name'] = self.env['app_bundle'] self.env['pkg_path'] = self.env['PKG'] self.env['version'] = self.env['app_version'] # Get minimum_os_version from override # https://github.com/autopkg/dataJAR-recipes/issues/138 pkginfo = { 'minimum_os_version': self.env['MINIMUM_OS_VERSION'] } # Allow the user to provide a display_name string that prevents CreativeCloudVersioner # from overriding it. if 'pkginfo' not in self.env or 'display_name' not in self.env['pkginfo']: pkginfo['display_name'] = self.env['display_name'] # Create pkginfo is missing from installs array if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']: pkginfo['installs'] = [{ self.env['vers_compare_key']: self.env['version'], 'path': self.env['installed_path'], 'type': 'application', 'version_comparison_key': self.env['vers_compare_key'], 'CFBundleIdentifier': self.env['app_bundle_id'], }] # Set Processor Architecture info if self.env['architecture_type'] == "x64": pkginfo['supported_architectures'] = [ 'x86_64', ] self.env['architecture_type'] = '-Intel' elif self.env['architecture_type'] == "arm64": pkginfo['supported_architectures'] = [ 'arm64', ] self.env['architecture_type'] = '-ARM' # Notify of additional_pkginfo self.env['additional_pkginfo'] = pkginfo self.output("additional_pkginfo: {}".format(self.env['additional_pkginfo'])) if __name__ == '__main__': PROCESSOR = Adobe2021Versioner() Adobe 2021/Adobe2021Importer.py METASEP #!/usr/local/autopkg/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION Imports Adobe 2021 titles found in running users ~/Downloads ''' # Standard Imports from __future__ import absolute_import from __future__ import print_function import argparse import glob import os import subprocess import sys # Version __version__ = '1.2' # Functions def main(): ''' Look within DOWNLOADS_PATH for Adobe*2021* items, add to adobe_folders list if found ''' # Progress notification print("Looking for {} folders ...".format(os.path.join(DOWNLOADS_PATH, 'Adobe*2021*'))) # Create empty list adobe_folders = [] # Look within DOWNLOADS_PATH for Adobe*2021 items, add to adobe_folders list if found for some_item in os.listdir(DOWNLOADS_PATH): some_path = os.path.join(DOWNLOADS_PATH, some_item) if os.path.isdir(some_path): if some_item.startswith('Adobe') and '2021' in (some_item): adobe_folders.append(some_item) # If no folders are found, exit if not adobe_folders: print("No Adobe*2021 folders found in {}, exiting...".format(DOWNLOADS_PATH)) sys.exit(1) # If 1 or moe folders are found, notify and proceed. if len(adobe_folders) == 1: print("1 Adobe 2021 folder found, creating recipe list...") else: print("{} Adobe 2021 folder found, creating recipe list...".format(len(adobe_folders))) # Check for pkg's pkg_checker(sorted(adobe_folders)) def pkg_checker(adobe_folders): ''' Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do ''' # Progress notification print("Looking for pkgs...") # count var found_pkgs = 0 # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg, for adobe_folder in adobe_folders: # var declaration install_pkg = None uninstall_pkg = None adobe_build_folder_path = os.path.join(DOWNLOADS_PATH, adobe_folder, 'Build') # Look for *_Install.pkg try: install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0] print("Found {}...".format(install_pkg)) except IndexError: print("Cannot find *_Install.pkg within: {}...".format(adobe_build_folder_path)) # Look for *_Uninstall.pkg try: uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0] print("Found {}...".format(uninstall_pkg)) except IndexError: print("Cannot find *_Uninstall.pkg within: {}...".format(adobe_build_folder_path)) # If we can find both *_Install.pkg and *_Uninstall.pkg, add to ADOBE_LIST if install_pkg and uninstall_pkg: # Increment count found_pkgs += 1 # Append to ADOBE_LIST create_list(adobe_folder, found_pkgs) else: print("Cannot find both an *_Install.pkg and *_Uninstall.pkg for {}... " "Skipping...".format(adobe_folder)) # If we did not find any pkg pairs to import if found_pkgs == 0: print("ERROR: No Adobe 2021 pkg pairs found, exiting...") sys.exit(1) # Else, run the recipe list ADOBE_LIST else: run_list() def create_list(adobe_folder, found_pkgs): ''' Create recipe list ''' # Create an empty file at ADOBE_List, if this is the 1st found pkg if found_pkgs == 1: open(ADOBE_LIST, 'w').close() # var declaration library_dir = os.path.expanduser('~/Library/') override_path = os.path.join(library_dir, 'AutoPkg', 'RecipeOverrides', \ adobe_folder + '.' \ + RECIPE_TYPE + '.recipe') override_name = 'local.' + RECIPE_TYPE + '.' + adobe_folder # If we cannot find the override if not os.path.isfile(override_path): print("Skipping {}, as cannot find override...".format(override_path)) return # Append to ADOBE_LIST list_file = open(ADOBE_LIST, 'a+') list_file.write(override_name + '\n') list_file.close() def run_list(): ''' Run recipe list ''' # Notify we're starting print("Running recipe_list: `{}`".format(ADOBE_LIST)) print() # The subprocess command cmd_args = ['/usr/local/bin/autopkg', 'run', '-v', '--recipe-list', ADOBE_LIST, '--report-plist', REPORT_PATH] # Notify what command we're about to run. print('Running `{}`...'.format(cmd_args)) # Run the command subprocess.call(cmd_args) if __name__ == '__main__': # Parse recipe type argument PARSER = argparse.ArgumentParser() PARSER.add_argument('type', type=str, help='Recipe type, either "munki" or "jss"') ARG_PARSER = PARSER.parse_args() RECIPE_TYPE = ARG_PARSER.type.lower() # Constants DOWNLOADS_PATH = os.path.expanduser('~/Downloads/') ADOBE_LIST = os.path.join(DOWNLOADS_PATH + 'adobe2021_list.txt') REPORT_PATH = os.path.join(DOWNLOADS_PATH + 'adobe2021_report.plist') # Call main def main() Adobe 2020/Adobe2020Versioner.py METASEP #!/usr/local/autopkg/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION See docstring for AdobeCC2020Versioner class ''' # Standard Imports from __future__ import absolute_import import json import os import re import xml import zipfile from xml.etree import ElementTree # AutoPkg imports # pylint: disable = import-error try: from plistlib import loads as load_plist except ImportError: from FoundationPlist import readPlistFromString as load_plist from autopkglib import Processor, ProcessorError # Define class __all__ = ['Adobe2020Versioner'] __version__ = ['1.4.1'] # Class def class Adobe2020Versioner(Processor): ''' Parses generated Adobe Admin Console 2020 pkgs for detailed application path and bundle version info. ''' description = __doc__ input_variables = { } output_variables = { 'additional_pkginfo': { 'description': 'Some pkginfo fields extracted from the Adobe metadata.', }, 'jss_inventory_name': { 'description': 'Application title for jamf pro smart group criteria.', }, 'version': { 'description': ('The value of CFBundleShortVersionString for the app bundle. ' 'This may match user_facing_version, but it may also be more ' 'specific and add another version component.'), }, } def main(self): ''' Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise if corresponding *_Uninstall.pkg is missing. Then determine a pkginfo, version and jss inventory name from the Adobe*_Install.pkg ''' # var declaration download_path = os.path.expanduser('~/Downloads') install_lang = None # Path to Adobe*_Install.pkg in the titles Downloads folder self.env['PKG'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Install.pkg')) self.output("install_pkg {}".format(self.env['PKG'])) # Path to Adobe*_Uninstall.pkg n the titles Downloads folder self.env['uninstaller_pkg_path'] = (os.path.join(download_path, self.env['NAME'], 'Build', self.env['NAME'] + '_Uninstall.pkg')) self.output("uninstall_pkg {}".format(self.env['uninstaller_pkg_path'])) # Path to titles optionXML.xml option_xml_path = os.path.join(self.env['PKG'], 'Contents', 'Resources', 'optionXML.xml') self.output("Processing {}".format(option_xml_path)) # Try to parse option_xml, raise if an issue try: option_xml = ElementTree.parse(option_xml_path) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(option_xml_path, err_msg)) # Check to see if HDMedia keys set for hd_media in option_xml.findall('.//HDMedias/HDMedia'): # If we have HDMedia, set vars if hd_media.findtext('MediaType') == 'Product': install_lang = hd_media.findtext('installLang') self.env['sap_code'] = hd_media.findtext('SAPCode') self.env['target_folder'] = hd_media.findtext('TargetFolderName') # If no HDMedia is found, then install_lang will be none if install_lang is None: # Get vars for RIBS media for ribs_media in option_xml.findall('.//Medias/Media'): install_lang = ribs_media.findtext('installLang') self.env['sap_code'] = ribs_media.findtext('SAPCode') self.env['target_folder'] = ribs_media.findtext('TargetFolderName') # Display progress self.output("sap_code: {}".format(self.env['sap_code'])) self.output("target_folder: {}".format(self.env['target_folder'])) # Get app_json var self.env['app_json'] = os.path.join(self.env['PKG'], 'Contents/Resources/HD', \ self.env['target_folder'], 'Application.json') # If Application.json exists, we're looking at a HD installer if os.path.exists(self.env['app_json']): if not self.env['sap_code'] == 'APRO': # Process HD installer self.process_hd_installer_pt1() else: # If not a HD installer Acrobat is a 'current' title with a # RIBS PKG installer we can extract needed metadata from self.env['proxy_xml'] = (os.path.join(self.env['PKG'], 'Contents/Resources/Setup', self.env['target_folder'], 'proxy.xml')) # If proxy_xml does not exist, raise if not os.path.exists(self.env['proxy_xml']): raise ProcessorError("APRO selected, proxy.xml not found at: {}" .format(self.env['proxy_xml'])) # Else, process the APRO (Acrobat) installer self.process_apro_installer() def process_apro_installer(self): ''' Process APRO (Acrobat) installer ''' # Progress notification self.output("Processing Acrobat installer") self.output("proxy_xml: {}".format(self.env['proxy_xml'])) # Try to parse proxy_xml, raise if an issue try: parse_xml = ElementTree.parse(self.env['proxy_xml']) except xml.etree.ElementTree.ParseError as err_msg: raise ProcessorError("Failed to read {}: {}".format(self.env['proxy_xml'], err_msg)) # Get root of xml root = parse_xml.getroot() # Get app_bundle app_bundle_text = (root.findtext ('./ThirdPartyComponent/Metadata/Properties/Property[@name=\'path\']')) self.env['app_bundle'] = app_bundle_text.split('/')[1] self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path app_path_text = root.findtext('./InstallDir/Platform') self.env['app_path'] = app_path_text.split('/')[1] self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # Get app_version self.env['app_version'] = root.findtext('./InstallerProperties/Property[@name=\'ProductVersion\']') self.output("app_version: {}".format(self.env['app_version'])) # Get vers_compare_key self.env['vers_compare_key'] = 'CFBundleShortVersionString' self.output("vers_compare_key: {}".format(self.env['vers_compare_key'])) # Set bundle id self.env['app_bundle_id'] = 'com.adobe.Acrobat.Pro' self.output("app_bundle_id: {}".format(self.env['app_bundle_id'])) # Create pkginfo with found details self.create_pkginfo() def process_hd_installer_pt1(self): ''' Process HD installer - part 1 ''' # Progress notification self.output("Processing HD installer") # Read in app_json file with open(self.env['app_json']) as json_file: # Try to parse app_json as json, raise if an issue try: load_json = json.load(json_file) except json.JSONDecodeError as err_msg: raise ProcessorError("Failed to parse {}: {}".format(self.env['app_json'], err_msg)) # Get app_launch app_launch = load_json['AppLaunch'] self.output("app_launch: {}".format(app_launch)) # Get app_details, app_bundle and app_path app_details = list(re.split('/', app_launch)) if app_details[2].endswith('.app'): app_bundle = app_details[2] app_path = app_details[1] else: app_bundle = app_details[1] app_path = list(re.split('/', (load_json['InstallDir']['value'])))[1] # Get app_bundle self.env['app_bundle'] = app_bundle self.output("app_bundle: {}".format(self.env['app_bundle'])) # Get app_path self.env['app_path'] = app_path self.output("app_path: {}".format(self.env['app_path'])) # Get generic keys self.get_generic_keys() # 2nd part of process self.process_hd_installer_pt2(load_json) def process_hd_installer_pt2(self, load_json): ''' Process HD installer - part 2 ''' # Get name of the zip_file were to open zip_file = load_json['Packages']['Package'][0]['PackageName'] self.output("zip_file: {}".format(zip_file)) # Get pimx_dir if zip_file.endswith('-LearnPanel'): zip_file = load_json['Packages']['Package'][1]['PackageName'] pimx_dir = '2' else: pimx_dir = '1' self.output("pimx_dir: {}".format(pimx_dir)) # Get zip_path zip_path = (os.path.join(self.env['PKG'], 'Contents/Resources/HD', self.env['target_folder'], zip_file + '.zip')) self.output("zip_path: {}".format(zip_path)) # Open zip file, raise if fails try: with zipfile.ZipFile(zip_path, mode='r') as my_zip: # Read in pimx file with my_zip.open(zip_file + '.pimx') as my_txt: # Read in pimx file pimx_txt = my_txt.read() # Try to parse pimx file as XML, raise exception if fails try: xml_tree = ElementTree.fromstring(pimx_txt) # Try to read info.plist from within zip_bundle self.read_info_plist(my_zip, pimx_dir, xml_tree, zip_path) # If we cannot read in the pimx except xml.etree.ElementTree.ParseError as err_msg: self.output("Parsing {} failed with: {}, checking {}" .format(zip_file, err_msg, self.env['app_json'])) # Read in values from app_json self.parse_app_json(load_json) except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}".format(zip_path, err_msg)) # Now we have the deets, let's use them self.create_pkginfo() def get_generic_keys(self): ''' Generic keys to get regardless of title ''' # Progress notification self.env['installed_path'] = os.path.join('/Applications', self.env['app_path'], self.env['app_bundle']) self.output("installed_path: {}".format(self.env['installed_path'])) # Get display_name if not self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2020'): self.env['display_name'] = self.env['app_path'] + ' 2020' elif self.env['app_path'].endswith('CC') and not self.env['app_path'].endswith('2020'): self.env['display_name'] = self.env['app_path'] + ' 2020' else: self.env['display_name'] = self.env['app_path'] # Progress notification self.output("display_name: {}".format(self.env['display_name'])) def read_info_plist(self, my_zip, pimx_dir, xml_tree, zip_path): ''' Try to read info.plist from within zip_bundle ''' # Loop through .pmx's Assets, look for target=[INSTALLDIR], # then grab Assets Source. # Break when found .app/Contents/Info.plist for xml_elem in xml_tree.findall('Assets'): for xml_item in xml_elem.getchildren(): # Below special tweak for the non-Classic Lightroom bundle if (xml_item.attrib['target'].upper().startswith('[INSTALLDIR]') and not xml_item.attrib['target'].endswith('Icons')): # Get bundle_location bundle_location = xml_item.attrib['source'] self.output("bundle_location: {}".format(bundle_location)) else: continue # Amend bundle_location as needed if not bundle_location.startswith('[StagingFolder]'): continue if bundle_location.endswith('Icons') or \ bundle_location.endswith('AMT'): continue bundle_location = bundle_location[16:] # Create zip_bundle if bundle_location.endswith('.app'): zip_bundle = (os.path.join(pimx_dir, bundle_location, 'Contents/Info.plist')) else: zip_bundle = (os.path.join(pimx_dir, bundle_location, self.env['app_bundle'], 'Contents/Info.plist')) # Try to read info.plist from within zip_bundle try: with my_zip.open(zip_bundle) as my_plist: info_plist = my_plist.read() data = load_plist(info_plist) # If the App is Lightroom (Classic or non-Classic) # we need to compare a different value in Info.plist if self.env['sap_code'] == 'LTRM' or \ self.env['sap_code'] == 'LRCC': self.env['vers_compare_key'] = 'CFBundleVersion' else: self.env['vers_compare_key'] = ( 'CFBundleShortVersionString') # Get version from info.plist app_version = data[self.env['vers_compare_key']] # Get bundleid from info.plist self.env['app_bundle_id'] = data['CFBundleIdentifier'] # Progress notifications self.output("vers_compare_key: {}" .format(self.env['vers_compare_key'])) self.output("app_bundle_id: {}" .format(self.env['app_bundle_id'])) self.output("staging_folder: {}" .format(bundle_location)) self.output("staging_folder_path: {}" .format(zip_bundle)) self.env['app_version'] = app_version self.output("app_version: {}".format(self.env['app_version'])) break # If we cannot read the zip file except zipfile.BadZipfile as err_msg: raise ProcessorError("Failed to open {}: {}" .format(zip_path, err_msg)) def parse_app_json(self, load_json): ''' Read in values from app_json ''' # Get vers_compare_key self.env['vers_compare_key'] = 'CFBundleShortVersionString' # Get app_version, cautiously for now for only certain apps if self.env['sap_code'] == 'KBRG': self.env['app_version'] = load_json['ProductVersion'] self.env['app_bundle_id'] = 'com.adobe.bridge10' elif self.env['sap_code'] == 'ESHR': self.env['app_version'] = load_json['CodexVersion'] self.env['app_bundle_id'] = 'com.adobe.dimension' else: raise ProcessorError("Checking app_json for version details but sap code {}," "is neither ESHR nor KBRG".format(self.env['sap_code'])) self.output("app_version: {}".format(self.env['app_version'])) # Get app_bundle for app_launch in load_json['AppLaunch'].split('/'): if app_launch.endswith('.app'): app_bundle = ('/Applications/' + app_launch.split('.app')[0] + '/' + app_launch) self.output("app_bundle: {}".format(app_bundle)) def create_pkginfo(self): ''' Create pkginfo with found details ''' # More var declaration self.env['jss_inventory_name'] = self.env['app_bundle'] self.env['pkg_path'] = self.env['PKG'] self.env['version'] = self.env['app_version'] # Get minimum_os_version from override # https://github.com/autopkg/dataJAR-recipes/issues/138 pkginfo = { 'minimum_os_version': self.env['MINIMUM_OS_VERSION'] } # Allow the user to provide a display_name string that prevents CreativeCloudVersioner # from overriding it. if 'pkginfo' not in self.env or 'display_name' not in self.env['pkginfo']: pkginfo['display_name'] = self.env['display_name'] # Create pkginfo is missing from installs array if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']: pkginfo['installs'] = [{ self.env['vers_compare_key']: self.env['version'], 'path': self.env['installed_path'], 'type': 'application', 'version_comparison_key': self.env['vers_compare_key'], 'CFBundleIdentifier': self.env['app_bundle_id'], }] # Notify of additional_pkginfo self.env['additional_pkginfo'] = pkginfo self.output("additional_pkginfo: {}".format(self.env['additional_pkginfo'])) if __name__ == '__main__': PROCESSOR = Adobe2020Versioner() Adobe 2020/Adobe2020Importer.py METASEP #!/usr/bin/python ''' Copyright (c) 2020, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION Imports Adobe 2020 titles found in running users ~/Downloads ''' from __future__ import absolute_import from __future__ import print_function import argparse import glob import os import subprocess import sys __version__ = '1.1' def main(): '''Gimme some main''' adobe_folders = [] for some_item in os.listdir(DOWNLOADS_PATH): some_path = os.path.join(DOWNLOADS_PATH, some_item) if os.path.isdir(some_path): if some_item.startswith('Adobe') and some_item.endswith('2020'): adobe_folders.append(some_item) if not len(adobe_folders): print('No Adobe*2020 folders found in %s, exiting...' % DOWNLOADS_PATH) sys.exit(1) if len(adobe_folders) == 1: print('1 Adobe 2020 folder found, creating recipe list...') else: print('%s Adobe 2020 folder found, creating recipe list...' % len(adobe_folders)) open(ADOBE_LIST, 'w').close() pkg_checker(adobe_folders) def pkg_checker(adobe_folders): ''' Check that we have the Install_pkg's & proceed if we do''' found_pkgs = 0 print('Looking for pkgs...') for adobe_folder in sorted(adobe_folders): try: install_pkg = glob.glob(os.path.join(DOWNLOADS_PATH, adobe_folder, \ 'Build', '*_Install.pkg'))[0] print('Found {0}...'.format(install_pkg)) if os.path.exists(install_pkg): create_list(adobe_folder) found_pkgs += 1 else: print('Cannot find pkg ({0}), for {1}... Skipping...'.format\ (install_pkg, adobe_folder)) except IndexError as err_msg: print('Skipping {0}, as cannot find Install.pkg: {1}...'.format(adobe_folder, err_msg)) if found_pkgs == 0: print('No pkgs found, exiting...') sys.exit(1) else: run_list() def create_list(adobe_folder): ''' Create recipe list ''' library_dir = os.path.expanduser('~/Library/') override_path = os.path.join(library_dir, 'AutoPkg', 'RecipeOverrides', \ adobe_folder + '.' \ + RECIPE_TYPE + '.recipe') override_name = 'local.' + RECIPE_TYPE + '.' + adobe_folder if not os.path.isfile(override_path): print('Skipping {0}, as cannot find override...'.format(override_path)) return list_file = open(ADOBE_LIST, 'a+') list_file.write(override_name + '\n') list_file.close() def run_list(): '''Run recipe list''' if os.path.exists(ADOBE_LIST): print('Running recipe_list: `{0}`'.format(ADOBE_LIST)) print() cmd_args = ['/usr/local/bin/autopkg', 'run', '-v', '--recipe-list', ADOBE_LIST, \ '--report-plist', REPORT_PATH] print('Running `{0}`...'.format(cmd_args)) subprocess.call(cmd_args) else: print('Recipe list not populated, make sure you have the needed overrides in place....') if __name__ == '__main__': # Try to locate autopkg if not os.path.exists('/usr/local/bin/autopkg'): print('Cannot find autopkg') sys.exit(1) # Parse recipe type argument PARSER = argparse.ArgumentParser() PARSER.add_argument('type', type=str, help='Recipe type, either "munki" or "jss"') ARG_PARSER = PARSER.parse_args() RECIPE_TYPE = ARG_PARSER.type.lower() # Constants DOWNLOADS_PATH = os.path.expanduser('~/Downloads/') ADOBE_LIST = os.path.join(DOWNLOADS_PATH + 'adobe2020_list.txt') REPORT_PATH = os.path.join(DOWNLOADS_PATH + 'adobe2020_report.plist') main() Adobe Admin Console Packages/AdobeAdminConsolePackagesImporter.py METASEP #!/usr/local/autopkg/python # pylint: disable = invalid-name ''' Copyright (c) 2022, dataJAR Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither data JAR Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SUPPORT FOR THIS PROGRAM This program is distributed 'as is' by DATA JAR LTD. For more information or support, please utilise the following resources: http://www.datajar.co.uk DESCRIPTION Imports Adobe Admin Console Packages ''' # Standard Imports import argparse import glob import os import plistlib import subprocess import sys import yaml # pylint: disable = import-error from CoreFoundation import CFPreferencesCopyAppValue # Version __version__ = '1.0' # Functions def main(): ''' Check passed arguments before proceeding ''' # Setup arparse parser = argparse.ArgumentParser() parser.add_argument('type', type=str, help="Recipe type, for example: \"munki\" or \"jss\"") arg_parser = parser.parse_args() # Retrieve passed arguments, and assign to variables recipe_type = arg_parser.type.lower() packages_path = os.path.expanduser('~/Downloads/') # Check that packages_path exists if not os.path.exists(packages_path): print(f"ERROR: Cannot locate directory, {packages_path}... exiting...") sys.exit(1) # Check that packages_path is a directory if not os.path.isdir(packages_path): print(f"ERROR: {packages_path} is a not a directory... exiting...") sys.exit(1) # Check for Adobe* dirs look_for_dirs(packages_path, recipe_type) def look_for_dirs(packages_path, recipe_type): ''' Look for dirs starting with Adobe*, in packages_path ''' # Progress notification print(f"Looking in {packages_path} for Adobe* folders ...") # Create empty list adobe_folders = [] # Look within packages_path for Adobe* items, add to adobe_folders list if found for some_item in os.listdir(packages_path): some_path = os.path.join(packages_path, some_item) if os.path.isdir(some_path): if some_item.startswith('Adobe'): adobe_folders.append(some_item) # If no folders are found, exit if not adobe_folders: print(f"No Adobe* folders found in {packages_path}, exiting...") sys.exit(1) # If 1 or more folders are found, notify and proceed. if len(adobe_folders) == 1: print("1 Adobe folder found...") else: print(f"{len(adobe_folders)} Adobe folders found...") # Get the override_dirs try: override_dirs = CFPreferencesCopyAppValue('RECIPE_OVERRIDE_DIRS', 'com.github.autopkg').split() except AttributeError: override_dirs = os.path.join(os.path.expanduser('~/Library/'), 'AutoPkg', 'RecipeOverrides').split() print(f"Override dirs: {override_dirs}") # Check for pkg's pkg_checker(sorted(adobe_folders), override_dirs, packages_path, recipe_type) def pkg_checker(adobe_folders, override_dirs, packages_path, recipe_type): ''' Check that we have the Install_pkg's & Uninstall_pkg's needed, proceed if we do ''' # Progress notification print("Looking for pkgs...") # Count var found_pkgs = 0 # For each folder within adobe_folders, look for *_Install.pkg and *_Uninstall.pkg, for adobe_folder in adobe_folders: # Var declaration install_pkg = None uninstall_pkg = None adobe_build_folder_path = os.path.join(packages_path, adobe_folder, 'Build') recipe_list_path = os.path.join(packages_path + 'adobe_admin_console_recipes_list.txt') report_path = os.path.join(packages_path + 'adobe_admin_console_recipes_report.plist') # Progress notification print(f"Checking {adobe_build_folder_path}...") if not os.path.isdir(adobe_build_folder_path): print(f"No Build dir at {adobe_build_folder_path}... skipping...") else: print(f"Found Build dir at {adobe_build_folder_path}...") # Look for *_Install.pkg try: install_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Install.pkg'))[0] print(f"Found {install_pkg}...") except IndexError: print(f"Cannot find *_Install.pkg within: {adobe_build_folder_path}...") # Look for *_Uninstall.pkg try: uninstall_pkg = glob.glob(os.path.join(adobe_build_folder_path, '*_Uninstall.pkg'))[0] print(f"Found {uninstall_pkg}...") except IndexError: print("Cannot find *_Uninstall.pkg within: {adobe_build_folder_path}...") # If we can find both *_Install.pkg and *_Uninstall.pkg, add to recipe_list_path if install_pkg and uninstall_pkg: # Increment count found_pkgs += 1 # Append to recipe_list_path create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type) else: print(f"ERROR: Cannot find {adobe_folder}, these recipes need packages of the " "Managed Package variety, which include _Install and _Uninstall packages" ".... skipping...") # If we did not find any pkg pairs to import, exit if found_pkgs == 0: print("ERROR: No Adobe pkg pairs found, exiting...") sys.exit(1) # Run recipe list run_list(recipe_list_path, report_path) def create_list(adobe_folder, found_pkgs, override_dirs, recipe_list_path, recipe_type): ''' Create recipe list ''' # Var declaration override_path = None # Look for recipes in override_dirs for override_dir in override_dirs: recipe_files = os.listdir(override_dir) for recipe_file in recipe_files: if recipe_file.startswith(adobe_folder) and recipe_type in recipe_file: override_path = os.path.join(override_dir, recipe_file) if not override_path: # Return when we cannot find a matching override print(f"Cannot find override starting with: {adobe_folder}, skipping...") return print(f"Found override at: {override_path}, proceeding...") # Create an empty file at recipe_list_path, if this is the 1st found pkg if found_pkgs == 1: with open(recipe_list_path, 'w', encoding='utf-8') as new_file: new_file.write('') # Retrieve override name from file # Borrowed with <3 from: # https://github.com/autopkg/autopkg/blob/405c913deab15042819e2f77f1587a805b7c1ada/Code/autopkglib/__init__.py#L341-L359 if override_path.endswith(".yaml"): try: # try to read it as yaml with open (override_path, 'rb') as read_file: recipe_dict = yaml.load(read_file, Loader=yaml.FullLoader) override_name = recipe_dict["Identifier"] # pylint: disable = broad-except except Exception as err_msg: print(f"ERROR: yaml error for {override_path}: {err_msg}") return try: # try to read it as a plist with open (override_path, 'rb') as read_file: recipe_dict = plistlib.load(read_file) override_name = recipe_dict["Identifier"] # pylint: disable = broad-except except Exception as err_msg: print(f"ERROR: plist error for {override_path}: {err_msg}") return print(f"Adding {override_path}, to {recipe_list_path} with identifier: {override_name}...") # Append to recipe_list_path with open(recipe_list_path, 'a+', encoding='utf-8') as list_file: list_file.write(override_name + '\n') def run_list(recipe_list_path, report_path): ''' Run recipe list ''' # Check that the recipe_list file has content before proceeding with open (recipe_list_path, encoding='utf-8') as recipe_list_file: content_test = recipe_list_file.readlines() if not content_test: print(f"{recipe_list_path} is empty, no overrides found... exiting ...") sys.exit(1) # Notify we're starting print(f"Running recipe_list: `{recipe_list_path}`") # The subprocess command cmd_args = ['/usr/local/bin/autopkg', 'run', '-vv', '--recipe-list', recipe_list_path, '--report-plist', report_path] # Notify what command we're about to run. print(f"Running: `{cmd_args}`...") # Run the command subprocess.call(cmd_args) if __name__ == '__main__': # Gimme some main main() Adobe Admin Console Packages/AdobeAdminConsolePackagesPkgInfoCreator.py METASEP
[ { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (\n self.env['aacp_application_description'] + '.')\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n # Only add if forceKillAllowed is False\n if not conflicting_process['forceKillAllowed']:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = os.path.dirname(os.path.realpath(__file__))\n self.output(f\"aacp_parent_dir: {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n self.env['aacp_autopkg_json'] = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get applications dict from the json\n for application_data in self.env['aacp_autopkg_json']:\n if application_data['sap_code'] == self.env['aacp_application_sap_code']:\n for aacp_version_json in application_data['versions'].keys():\n try:\n if aacp_version_json == self.env['aacp_application_major_version']:\n self.env['aacp_matched_json'] = (application_data['versions']\n [self.env['aacp_application_major_version']])\n self.output(f\"aacp_matched_json: {self.env['aacp_matched_json']}\")\n except KeyError as err_msg:\n raise ProcessorError(f\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: \"\n f\"{self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}... \"\n f\"exiting...\") from err_msg\n\n # If we found a match\n if self.env['aacp_matched_json']:\n self.process_matched_json(load_json)\n else:\n raise ProcessorError(\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: {self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}...\")\n\n\n def process_matched_json(self, load_json):\n '''\n Get metadata with the aid of self.env['aacp_matched_json']\n '''\n # Applications version, if not APRO\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['version'] = load_json[self.env['aacp_matched_json']['app_json_version_key']]\n self.output(f\"version: {self.env['version']}\")\n\n # If the version is unsupported\n if 'unsupported_versions_dict' in self.env['aacp_matched_json']:\n self.output(f\"unsupported_versions_dict: {self.env['aacp_matched_json']\"\n f\"['unsupported_versions_dict']}\")\n for unsupported_version in self.env['aacp_matched_json']['unsupported_versions_dict']:\n if unsupported_version == self.env['version']:\n raise ProcessorError(f\"{self.env['aacp_matched_json']['unsupported_versions_dict']\"\n f\"[unsupported_version]}\")\n\n # Applications bundle id\n self.env['aacp_application_bundle_id'] = self.env['aacp_matched_json']['app_bundle_id']\n self.output(f\"aacp_application_bundle_id: {self.env['aacp_application_bundle_id']}\")\n\n # Applications minimum os, if APRO get from self.env['aacp_matched_json']\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['aacp_application_minimum_os'] = (re.search(self.env['aacp_matched_json']\n ['minos_regex'],\n load_json['SystemRequirement']\n ['CheckCompatibility']\n ['Content'])[1])\n self.output(f\"aacp_application_minimum_os: {self.env['aacp_application_minimum_os']}\")\n else:\n self.env['aacp_application_minimum_os'] = self.env['aacp_matched_json']['minos_version']\n\n # Applications version comparison key\n self.env['aacp_version_compare_key'] = (self.env['aacp_matched_json']\n ['version_comparison_key'])\n self.output(f\"aacp_version_compare_key: {self.env['aacp_version_compare_key']}\")\n\n # Applications display name\n self.env['aacp_application_display_name'] = self.env['aacp_matched_json']['display_name']\n self.output(f\"aacp_application_display_name: {self.env['aacp_application_display_name']}\")\n\n # Full path to the application bundle on disk, as per Terminal etc, not Finder\n self.env['aacp_application_full_path'] = self.env['aacp_matched_json']['app_path']\n\n # Get description if missing:\n if not 'aacp_application_description' in self.env:\n self.env['aacp_application_description'] = (self.env['aacp_matched_json']\n ['app_description'])\n self.output(f\"aacp_application_description: description missing, set from \"\n f\"aacp_matched_json: \"\n f\"{self.env['aacp_application_description']}\")\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (self.env['aacp_application_description']\n + '.')\n\n # Get additional_blocking_applications\n if 'additional_blocking_applications' in self.env['aacp_matched_json']:\n for additional_blocking_application in (self.env['aacp_matched_json']\n ['additional_blocking_applications']):\n self.env['aacp_blocking_applications'].append(additional_blocking_application)\n self.env['aacp_blocking_applications'] = (\n sorted(set(self.env['aacp_blocking_applications'])))\n self.output(f\"aacp_blocking_applications updated: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # Now we have the deets, let's use them", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (\n self.env['aacp_application_description'] + '.')\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n # Only add if forceKillAllowed is False\n if not conflicting_process['forceKillAllowed']:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = os.path.dirname(os.path.realpath(__file__))\n self.output(f\"aacp_parent_dir: {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n self.env['aacp_autopkg_json'] = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get applications dict from the json\n for application_data in self.env['aacp_autopkg_json']:\n if application_data['sap_code'] == self.env['aacp_application_sap_code']:\n for aacp_version_json in application_data['versions'].keys():\n try:\n if aacp_version_json == self.env['aacp_application_major_version']:\n self.env['aacp_matched_json'] = (application_data['versions']\n [self.env['aacp_application_major_version']])\n self.output(f\"aacp_matched_json: {self.env['aacp_matched_json']}\")\n except KeyError as err_msg:\n raise ProcessorError(f\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: \"\n f\"{self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}... \"\n f\"exiting...\") from err_msg\n\n # If we found a match\n if self.env['aacp_matched_json']:\n self.process_matched_json(load_json)\n else:\n raise ProcessorError(\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: {self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}...\")\n\n\n def process_matched_json(self, load_json):\n '''\n Get metadata with the aid of self.env['aacp_matched_json']\n '''\n # Applications version, if not APRO\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['version'] = load_json[self.env['aacp_matched_json']['app_json_version_key']]\n self.output(f\"version: {self.env['version']}\")\n\n # If the version is unsupported\n if 'unsupported_versions_dict' in self.env['aacp_matched_json']:\n self.output(f\"unsupported_versions_dict: {self.env['aacp_matched_json']\"\n f\"['unsupported_versions_dict']}\")\n for unsupported_version in self.env['aacp_matched_json']['unsupported_versions_dict']:\n if unsupported_version == self.env['version']:\n raise ProcessorError(f\"{self.env['aacp_matched_json']['unsupported_versions_dict']\"\n f\"[unsupported_version]}\")\n\n # Applications bundle id\n self.env['aacp_application_bundle_id'] = self.env['aacp_matched_json']['app_bundle_id']\n self.output(f\"aacp_application_bundle_id: {self.env['aacp_application_bundle_id']}\")\n\n # Applications minimum os, if APRO get from self.env['aacp_matched_json']\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['aacp_application_minimum_os'] = (re.search(self.env['aacp_matched_json']\n ['minos_regex'],\n load_json['SystemRequirement']\n ['CheckCompatibility']\n ['Content'])[1])\n self.output(f\"aacp_application_minimum_os: {self.env['aacp_application_minimum_os']}\")\n else:\n self.env['aacp_application_minimum_os'] = self.env['aacp_matched_json']['minos_version']\n\n # Applications version comparison key\n self.env['aacp_version_compare_key'] = (self.env['aacp_matched_json']\n ['version_comparison_key'])\n self.output(f\"aacp_version_compare_key: {self.env['aacp_version_compare_key']}\")\n\n # Applications display name\n self.env['aacp_application_display_name'] = self.env['aacp_matched_json']['display_name']\n self.output(f\"aacp_application_display_name: {self.env['aacp_application_display_name']}\")\n\n # Full path to the application bundle on disk, as per Terminal etc, not Finder\n self.env['aacp_application_full_path'] = self.env['aacp_matched_json']['app_path']\n\n # Get description if missing:\n if not 'aacp_application_description' in self.env:\n self.env['aacp_application_description'] = (self.env['aacp_matched_json']\n ['app_description'])\n self.output(f\"aacp_application_description: description missing, set from \"\n f\"aacp_matched_json: \"\n f\"{self.env['aacp_application_description']}\")\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (self.env['aacp_application_description']\n + '.')\n\n # Get additional_blocking_applications\n if 'additional_blocking_applications' in self.env['aacp_matched_json']:\n for additional_blocking_application in (self.env['aacp_matched_json']\n ['additional_blocking_applications']):\n self.env['aacp_blocking_applications'].append(additional_blocking_application)\n self.env['aacp_blocking_applications'] = (\n sorted(set(self.env['aacp_blocking_applications'])))\n self.output(f\"aacp_blocking_applications updated: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # Now we have the deets, let's use them\n self.create_pkginfo()\n\n\n def create_pkginfo(self):\n '''\n Create pkginfo with found details\n '''\n\n # var declaration\n pkginfo = {}\n\n # Set pkginfo variables\n if (self.env['aacp_application_architecture_type'] and\n not self.env['aacp_application_architecture_type'] == 'macuniversal'):\n pkginfo['supported_architectures'] = [self.env['aacp_application_architecture_type']]\n\n if self.env['aacp_application_description']:\n pkginfo['description'] = self.env['aacp_application_description']\n\n if self.env['aacp_application_display_name']:\n pkginfo['display_name'] = self.env['aacp_application_display_name']\n\n if self.env['aacp_blocking_applications']:\n pkginfo['blocking_applications'] = self.env['aacp_blocking_applications']\n\n if self.env['aacp_application_minimum_os']:\n pkginfo['minimum_os_version'] = self.env['aacp_application_minimum_os']\n\n # Check for any var replacements\n for some_key in self.env:\n # only process the keys beginning with aacp_ and those not ending _json\n if some_key.startswith('aacp_') and not some_key.endswith('_json'):", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (\n self.env['aacp_application_description'] + '.')\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n # Only add if forceKillAllowed is False\n if not conflicting_process['forceKillAllowed']:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (\n self.env['aacp_application_description'] + '.')\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n # Only add if forceKillAllowed is False\n if not conflicting_process['forceKillAllowed']:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = os.path.dirname(os.path.realpath(__file__))\n self.output(f\"aacp_parent_dir: {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n self.env['aacp_autopkg_json'] = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get applications dict from the json\n for application_data in self.env['aacp_autopkg_json']:\n if application_data['sap_code'] == self.env['aacp_application_sap_code']:\n for aacp_version_json in application_data['versions'].keys():\n try:\n if aacp_version_json == self.env['aacp_application_major_version']:\n self.env['aacp_matched_json'] = (application_data['versions']\n [self.env['aacp_application_major_version']])\n self.output(f\"aacp_matched_json: {self.env['aacp_matched_json']}\")\n except KeyError as err_msg:\n raise ProcessorError(f\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: \"\n f\"{self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}... \"\n f\"exiting...\") from err_msg\n\n # If we found a match\n if self.env['aacp_matched_json']:\n self.process_matched_json(load_json)\n else:\n raise ProcessorError(\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: {self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}...\")\n\n\n def process_matched_json(self, load_json):\n '''\n Get metadata with the aid of self.env['aacp_matched_json']\n '''\n # Applications version, if not APRO\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['version'] = load_json[self.env['aacp_matched_json']['app_json_version_key']]\n self.output(f\"version: {self.env['version']}\")\n\n # If the version is unsupported\n if 'unsupported_versions_dict' in self.env['aacp_matched_json']:\n self.output(f\"unsupported_versions_dict: {self.env['aacp_matched_json']\"\n f\"['unsupported_versions_dict']}\")\n for unsupported_version in self.env['aacp_matched_json']['unsupported_versions_dict']:\n if unsupported_version == self.env['version']:\n raise ProcessorError(f\"{self.env['aacp_matched_json']['unsupported_versions_dict']\"\n f\"[unsupported_version]}\")\n\n # Applications bundle id\n self.env['aacp_application_bundle_id'] = self.env['aacp_matched_json']['app_bundle_id']\n self.output(f\"aacp_application_bundle_id: {self.env['aacp_application_bundle_id']}\")\n\n # Applications minimum os, if APRO get from self.env['aacp_matched_json']\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['aacp_application_minimum_os'] = (re.search(self.env['aacp_matched_json']\n ['minos_regex'],\n load_json['SystemRequirement']\n ['CheckCompatibility']\n ['Content'])[1])\n self.output(f\"aacp_application_minimum_os: {self.env['aacp_application_minimum_os']}\")\n else:\n self.env['aacp_application_minimum_os'] = self.env['aacp_matched_json']['minos_version']\n\n # Applications version comparison key\n self.env['aacp_version_compare_key'] = (self.env['aacp_matched_json']\n ['version_comparison_key'])\n self.output(f\"aacp_version_compare_key: {self.env['aacp_version_compare_key']}\")\n\n # Applications display name\n self.env['aacp_application_display_name'] = self.env['aacp_matched_json']['display_name']\n self.output(f\"aacp_application_display_name: {self.env['aacp_application_display_name']}\")\n\n # Full path to the application bundle on disk, as per Terminal etc, not Finder\n self.env['aacp_application_full_path'] = self.env['aacp_matched_json']['app_path']\n\n # Get description if missing:\n if not 'aacp_application_description' in self.env:\n self.env['aacp_application_description'] = (self.env['aacp_matched_json']\n ['app_description'])\n self.output(f\"aacp_application_description: description missing, set from \"\n f\"aacp_matched_json: \"\n f\"{self.env['aacp_application_description']}\")\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (self.env['aacp_application_description']\n + '.')\n\n # Get additional_blocking_applications\n if 'additional_blocking_applications' in self.env['aacp_matched_json']:\n for additional_blocking_application in (self.env['aacp_matched_json']\n ['additional_blocking_applications']):\n self.env['aacp_blocking_applications'].append(additional_blocking_application)\n self.env['aacp_blocking_applications'] = (\n sorted(set(self.env['aacp_blocking_applications'])))\n self.output(f\"aacp_blocking_applications updated: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # Now we have the deets, let's use them\n self.create_pkginfo()\n\n\n def create_pkginfo(self):\n '''\n Create pkginfo with found details\n '''\n\n # var declaration\n pkginfo = {}\n\n # Set pkginfo variables\n if (self.env['aacp_application_architecture_type'] and\n not self.env['aacp_application_architecture_type'] == 'macuniversal'):\n pkginfo['supported_architectures'] = [self.env['aacp_application_architecture_type']]\n\n if self.env['aacp_application_description']:\n pkginfo['description'] = self.env['aacp_application_description']\n\n if self.env['aacp_application_display_name']:\n pkginfo['display_name'] = self.env['aacp_application_display_name']\n\n if self.env['aacp_blocking_applications']:\n pkginfo['blocking_applications'] = self.env['aacp_blocking_applications']\n\n if self.env['aacp_application_minimum_os']:\n pkginfo['minimum_os_version'] = self.env['aacp_application_minimum_os']\n\n # Check for any var replacements\n for some_key in self.env:\n # only process the keys beginning with aacp_ and those not ending _json\n if some_key.startswith('aacp_') and not some_key.endswith('_json'):\n self.replace_element(some_key)\n\n # Create pkginfo is missing from installs array\n #if 'pkginfo' not in self.env or 'installs' not in self.env['pkginfo']:\n pkginfo['installs'] = [{\n 'CFBundleIdentifier': self.env['aacp_application_bundle_id'],\n self.env['aacp_version_compare_key']: self.env['version'],\n 'path': self.env['aacp_application_full_path'],\n 'type': 'application',\n 'version_comparison_key': self.env['aacp_version_compare_key']\n }]\n\n # Notify of additional_pkginfo\n self.env['additional_pkginfo'] = pkginfo\n self.output(f\"additional_pkginfo: {self.env['additional_pkginfo']}\")\n\n\n # pylint: disable = too-many-branches\n def replace_element(self, some_key):\n '''\n Checks for instances of %var_name% and replaces with the value for the matching\n %var_name%\n\n '''\n\n # regex pattern\n re_pattern = '%(.*?)%'\n\n # If it's a string\n if isinstance(self.env[some_key], str):\n # check for a match\n re_match = re.search(re_pattern, self.env[some_key])\n # if we have a match\n if re_match:\n self.output(f\"found: %{re_match[1]}% in {some_key}, looking to replace...\")\n self.env[some_key] = (self.env[some_key].replace('%' + re_match[1] + '%',\n self.env[re_match[1]]))\n self.output(f\"{some_key} is now {self.env[some_key]}...\")\n # If a dict\n elif isinstance(self.env[some_key], dict):\n for sub_key in self.env[some_key]:\n # check for a match\n re_match = re.search('%(.*?)%', self.env[some_key][sub_key])\n # if we have a match\n if re_match:\n self.output(f\"found: %{re_match[1]}% in {sub_key} from {some_key}, \"\n \"looking to replace...\")\n self.env[some_key][sub_key] = (self.env[some_key][sub_key].replace('%' +\n re_match[1] + '%', self.env[re_match[1]]))\n self.output(f\"{sub_key} in {some_key} is now {self.env[some_key][sub_key]}...\")\n elif isinstance(self.env[some_key], list):\n for list_item in self.env[some_key]:\n # If it's a string\n if isinstance(list_item, str):\n # check for a match\n re_match = re.search(re_pattern, list_item)\n # if we have a match\n if re_match:\n self.output(f\"found: %{re_match[1]}% in {list_item}, looking to replace...\")\n self.env[some_key][list_item] = self.env[some_key][list_item].replace('%' +\n re_match[1] + '%', self.env[re_match[1]])\n self.output(f\"{list_item} is now {self.env[some_key][list_item]}...\")\n elif isinstance(list_item, dict):\n for sub_item in self.env[some_key][list_item]:\n # check for a match\n re_match = re.search('%(.*?)%', self.env[some_key][list_item])\n # if we have a match\n if re_match:\n self.output(f\"found: %{re_match[1]}% in {sub_item} from {list_item}, \"\n \"looking to replace...\")\n self.env[some_key][list_item] = (\n self.env[some_key][list_item].replace('%' + re_match[1] + '%',\n self.env[re_match[1]]))\n self.output(f\"{sub_item} in {list_item} is now \"\n f\"{self.env[some_key][list_item]}...\")\n else:\n self.output(f\"{some_key} is {type(self.env[some_key])}, processing skipped..\")\n else:\n self.output(f\"{some_key} is {type(self.env[some_key])}, processing skipped..\")\n\n\n\nif __name__ == '__main__':", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (\n self.env['aacp_application_description'] + '.')\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n # Only add if forceKillAllowed is False\n if not conflicting_process['forceKillAllowed']:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = os.path.dirname(os.path.realpath(__file__))\n self.output(f\"aacp_parent_dir: {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n self.env['aacp_autopkg_json'] = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get applications dict from the json\n for application_data in self.env['aacp_autopkg_json']:\n if application_data['sap_code'] == self.env['aacp_application_sap_code']:\n for aacp_version_json in application_data['versions'].keys():\n try:\n if aacp_version_json == self.env['aacp_application_major_version']:\n self.env['aacp_matched_json'] = (application_data['versions']\n [self.env['aacp_application_major_version']])\n self.output(f\"aacp_matched_json: {self.env['aacp_matched_json']}\")\n except KeyError as err_msg:\n raise ProcessorError(f\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: \"\n f\"{self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}... \"\n f\"exiting...\") from err_msg\n\n # If we found a match\n if self.env['aacp_matched_json']:", "type": "infile" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (\n self.env['aacp_application_description'] + '.')\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n # Only add if forceKillAllowed is False\n if not conflicting_process['forceKillAllowed']:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = os.path.dirname(os.path.realpath(__file__))\n self.output(f\"aacp_parent_dir: {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:", "type": "common" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:", "type": "common" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n", "type": "non_informative" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):", "type": "non_informative" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (\n self.env['aacp_application_description'] + '.')\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n # Only add if forceKillAllowed is False\n if not conflicting_process['forceKillAllowed']:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = os.path.dirname(os.path.realpath(__file__))\n self.output(f\"aacp_parent_dir: {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n self.env['aacp_autopkg_json'] = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get applications dict from the json\n for application_data in self.env['aacp_autopkg_json']:\n if application_data['sap_code'] == self.env['aacp_application_sap_code']:\n for aacp_version_json in application_data['versions'].keys():\n try:\n if aacp_version_json == self.env['aacp_application_major_version']:\n self.env['aacp_matched_json'] = (application_data['versions']\n [self.env['aacp_application_major_version']])\n self.output(f\"aacp_matched_json: {self.env['aacp_matched_json']}\")\n except KeyError as err_msg:\n raise ProcessorError(f\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: \"\n f\"{self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}... \"\n f\"exiting...\") from err_msg\n\n # If we found a match\n if self.env['aacp_matched_json']:\n self.process_matched_json(load_json)\n else:\n raise ProcessorError(\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: {self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}...\")\n\n\n def process_matched_json(self, load_json):\n '''\n Get metadata with the aid of self.env['aacp_matched_json']\n '''\n # Applications version, if not APRO\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['version'] = load_json[self.env['aacp_matched_json']['app_json_version_key']]\n self.output(f\"version: {self.env['version']}\")\n\n # If the version is unsupported\n if 'unsupported_versions_dict' in self.env['aacp_matched_json']:\n self.output(f\"unsupported_versions_dict: {self.env['aacp_matched_json']\"\n f\"['unsupported_versions_dict']}\")\n for unsupported_version in self.env['aacp_matched_json']['unsupported_versions_dict']:\n if unsupported_version == self.env['version']:\n raise ProcessorError(f\"{self.env['aacp_matched_json']['unsupported_versions_dict']\"\n f\"[unsupported_version]}\")\n\n # Applications bundle id\n self.env['aacp_application_bundle_id'] = self.env['aacp_matched_json']['app_bundle_id']\n self.output(f\"aacp_application_bundle_id: {self.env['aacp_application_bundle_id']}\")\n\n # Applications minimum os, if APRO get from self.env['aacp_matched_json']\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['aacp_application_minimum_os'] = (re.search(self.env['aacp_matched_json']\n ['minos_regex'],\n load_json['SystemRequirement']\n ['CheckCompatibility']\n ['Content'])[1])\n self.output(f\"aacp_application_minimum_os: {self.env['aacp_application_minimum_os']}\")\n else:\n self.env['aacp_application_minimum_os'] = self.env['aacp_matched_json']['minos_version']\n\n # Applications version comparison key\n self.env['aacp_version_compare_key'] = (self.env['aacp_matched_json']\n ['version_comparison_key'])\n self.output(f\"aacp_version_compare_key: {self.env['aacp_version_compare_key']}\")\n\n # Applications display name\n self.env['aacp_application_display_name'] = self.env['aacp_matched_json']['display_name']\n self.output(f\"aacp_application_display_name: {self.env['aacp_application_display_name']}\")\n\n # Full path to the application bundle on disk, as per Terminal etc, not Finder\n self.env['aacp_application_full_path'] = self.env['aacp_matched_json']['app_path']\n\n # Get description if missing:", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (\n self.env['aacp_application_description'] + '.')\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n # Only add if forceKillAllowed is False\n if not conflicting_process['forceKillAllowed']:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = os.path.dirname(os.path.realpath(__file__))\n self.output(f\"aacp_parent_dir: {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n self.env['aacp_autopkg_json'] = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get applications dict from the json\n for application_data in self.env['aacp_autopkg_json']:\n if application_data['sap_code'] == self.env['aacp_application_sap_code']:\n for aacp_version_json in application_data['versions'].keys():\n try:\n if aacp_version_json == self.env['aacp_application_major_version']:\n self.env['aacp_matched_json'] = (application_data['versions']\n [self.env['aacp_application_major_version']])\n self.output(f\"aacp_matched_json: {self.env['aacp_matched_json']}\")\n except KeyError as err_msg:\n raise ProcessorError(f\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: \"\n f\"{self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}... \"\n f\"exiting...\") from err_msg\n\n # If we found a match\n if self.env['aacp_matched_json']:\n self.process_matched_json(load_json)\n else:\n raise ProcessorError(\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: {self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}...\")\n\n\n def process_matched_json(self, load_json):\n '''\n Get metadata with the aid of self.env['aacp_matched_json']\n '''\n # Applications version, if not APRO\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['version'] = load_json[self.env['aacp_matched_json']['app_json_version_key']]\n self.output(f\"version: {self.env['version']}\")\n\n # If the version is unsupported\n if 'unsupported_versions_dict' in self.env['aacp_matched_json']:\n self.output(f\"unsupported_versions_dict: {self.env['aacp_matched_json']\"\n f\"['unsupported_versions_dict']}\")\n for unsupported_version in self.env['aacp_matched_json']['unsupported_versions_dict']:\n if unsupported_version == self.env['version']:\n raise ProcessorError(f\"{self.env['aacp_matched_json']['unsupported_versions_dict']\"\n f\"[unsupported_version]}\")\n\n # Applications bundle id\n self.env['aacp_application_bundle_id'] = self.env['aacp_matched_json']['app_bundle_id']\n self.output(f\"aacp_application_bundle_id: {self.env['aacp_application_bundle_id']}\")\n\n # Applications minimum os, if APRO get from self.env['aacp_matched_json']\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['aacp_application_minimum_os'] = (re.search(self.env['aacp_matched_json']\n ['minos_regex'],\n load_json['SystemRequirement']\n ['CheckCompatibility']\n ['Content'])[1])\n self.output(f\"aacp_application_minimum_os: {self.env['aacp_application_minimum_os']}\")\n else:\n self.env['aacp_application_minimum_os'] = self.env['aacp_matched_json']['minos_version']\n\n # Applications version comparison key\n self.env['aacp_version_compare_key'] = (self.env['aacp_matched_json']\n ['version_comparison_key'])\n self.output(f\"aacp_version_compare_key: {self.env['aacp_version_compare_key']}\")\n\n # Applications display name\n self.env['aacp_application_display_name'] = self.env['aacp_matched_json']['display_name']\n self.output(f\"aacp_application_display_name: {self.env['aacp_application_display_name']}\")\n\n # Full path to the application bundle on disk, as per Terminal etc, not Finder\n self.env['aacp_application_full_path'] = self.env['aacp_matched_json']['app_path']\n\n # Get description if missing:\n if not 'aacp_application_description' in self.env:\n self.env['aacp_application_description'] = (self.env['aacp_matched_json']\n ['app_description'])\n self.output(f\"aacp_application_description: description missing, set from \"\n f\"aacp_matched_json: \"\n f\"{self.env['aacp_application_description']}\")\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (self.env['aacp_application_description']\n + '.')\n\n # Get additional_blocking_applications\n if 'additional_blocking_applications' in self.env['aacp_matched_json']:\n for additional_blocking_application in (self.env['aacp_matched_json']\n ['additional_blocking_applications']):\n self.env['aacp_blocking_applications'].append(additional_blocking_application)\n self.env['aacp_blocking_applications'] = (\n sorted(set(self.env['aacp_blocking_applications'])))\n self.output(f\"aacp_blocking_applications updated: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # Now we have the deets, let's use them\n self.create_pkginfo()\n\n\n def create_pkginfo(self):\n '''\n Create pkginfo with found details\n '''\n\n # var declaration\n pkginfo = {}\n\n # Set pkginfo variables\n if (self.env['aacp_application_architecture_type'] and\n not self.env['aacp_application_architecture_type'] == 'macuniversal'):\n pkginfo['supported_architectures'] = [self.env['aacp_application_architecture_type']]\n\n if self.env['aacp_application_description']:\n pkginfo['description'] = self.env['aacp_application_description']\n", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],", "type": "random" }, { "content": "#!/usr/local/autopkg/python\n# pylint: disable = invalid-name\n'''\nCopyright (c) 2022, dataJAR Ltd. All rights reserved.\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither data JAR Ltd nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY\n DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nSUPPORT FOR THIS PROGRAM\n This program is distributed 'as is' by DATA JAR LTD.\n For more information or support, please utilise the following resources:\n http://www.datajar.co.uk\n\nDESCRIPTION\n\nGenerates installation information for Adobe Admin Console Packages\n'''\n\n# Standard Imports\nimport json\nimport os\nimport re\nimport xml\nfrom xml.etree import ElementTree\n\n\n# AutoPkg imports\n# pylint: disable = import-error\nfrom autopkglib import (Processor,\n ProcessorError)\n\n\n# Define class\n__all__ = ['AdobeAdminConsolePackagesPkgInfoCreator']\n__version__ = ['1.0']\n\n\n# Class def\nclass AdobeAdminConsolePackagesPkgInfoCreator(Processor):\n '''\n Parses generated Adobe Admin Console Packages to generate installation information.\n '''\n\n description = __doc__\n\n input_variables = {\n }\n\n output_variables = {\n 'aacp_application_path': {\n 'description': 'aacp_installdir_value after regex applied to get the path alone.',\n },\n 'aacp_application_architecture_type': {\n 'description': 'The architecture type for the title, either arm64 or x86_64',\n },\n 'aacp_application_install_lang': {\n 'description': 'The titles installation langauage.',\n },\n 'aacp_application_json_path': {\n 'description': 'Path to the tiles Application.json file.',\n },\n 'aacp_application_major_version': {\n 'description': 'The major version of the title.',\n },\n 'aacp_blocking_applications': {\n 'description': 'Sorted set of the conflicting processes.',\n },\n 'aacp_application_description': {\n 'description': 'Short description of the title.',\n },\n 'aacp_application_sap_code': {\n 'description': 'The titles sap code.',\n },\n 'aacp_install_pkg_path': {\n 'description': 'Path to the Adobe*_Install.pkg.',\n },\n 'aacp_json_path': {\n 'description': 'Path to AdobeAutoPkgApplicationData.json.',\n },\n 'aacp_matched_json': {\n 'description': ('dict from AdobeAutoPkgApplicationData.json, which matches the '\n '\"aacp_application_sap_code\" and \"aacp_application_major_version\".'),\n },\n 'aacp_option_xml_path': {\n 'description': 'Path to the tiles optionXML.xml file.',\n },\n 'aacp_parent_dir': {\n 'description': 'Path to parent directory of this processor.',\n },\n 'aacp_proxy_xml_path': {\n 'description': 'Acrobat only, path to proxy.xml.',\n },\n 'aacp_target_folder': {\n 'description': 'The name of the folder within the pkg to check files for metadata.',\n },\n 'aacp_uninstall_pkg_path': {\n 'description': 'Path to the Adobe*_Uninstall.pkg.',\n },\n 'additional_pkginfo': {\n 'description':\n 'Additonal pkginfo fields extracted from the Adobe metadata.',\n },\n 'version': {\n 'description': 'The titles version.',\n }\n }\n\n\n def main(self):\n '''\n Find the Adobe*_Install.pkg in the Downloads dir based on the name, raise\n if corresponding *_Uninstall.pkg is missing.\n '''\n\n # Progress notification\n self.output(\"Starting versioner process...\")\n\n # Get set packages_path\n self.env['aacp_packages_path'] = os.path.expanduser('~/Downloads/')\n self.output(f\"aacp_packages_path: {self.env['aacp_packages_path']}\")\n\n # Check that packages_path exists\n if not os.path.exists(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: Cannot locate directory, \"\n f\"{self.env['aacp_packages_path']}... exiting...\")\n\n # Check that packages_path is a directory\n if not os.path.isdir(self.env['aacp_packages_path']):\n raise ProcessorError(f\"ERROR: {self.env['aacp_packages_path']} is a not a \"\n \"directory... exiting...\")\n\n # Path to Adobe*_Install.pkg\n self.env['aacp_install_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Install.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_install_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find \"\n f\"{self.env['aacp_install_pkg_path']}... exiting...\")\n self.output(f\"aacp_install_pkg_path: {self.env['aacp_install_pkg_path']}\")\n\n # Path to Adobe*_Uninstall.pkg\n self.env['aacp_uninstall_pkg_path'] = (os.path.join(self.env['aacp_packages_path'],\n self.env['NAME'], 'Build',\n self.env['NAME'] + '_Uninstall.pkg'))\n\n # Check that the path exists, raise if not\n if not os.path.exists(self.env['aacp_uninstall_pkg_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_uninstall_pkg_path']}, these \"\n f\"recipes need packages of the Managed Package variety, which \"\n f\"include _Install and _Uninstall packages.... exiting...\")\n self.output(f\"aacp_uninstall_pkg_path {self.env['aacp_uninstall_pkg_path']}\")\n\n # Process the titles optionXML.xml\n self.process_optionxml_xml()\n\n\n def process_optionxml_xml(self):\n '''\n Process the titles optionXML.xml\n '''\n\n # Var declaration\n self.env['aacp_application_install_lang'] = None\n\n # Path to titles optionXML.xml\n self.env['aacp_option_xml_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents', 'Resources', 'optionXML.xml')\n if not os.path.exists(self.env['aacp_option_xml_path']):\n raise ProcessorError(f\"ERROR: Cannot find {self.env['aacp_option_xml_path']}... \"\n \"exiting...\")\n self.output(f\"aacp_option_xml_path: {self.env['aacp_option_xml_path']}\")\n\n # Progress notification\n self.output(f\"Processing: {self.env['aacp_option_xml_path']}...\")\n\n # Try to parse option_xml, raise if an issue\n try:\n option_xml = ElementTree.parse(self.env['aacp_option_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Check to see if HDMedia keys set\n for hd_media in option_xml.findall('.//HDMedias/HDMedia'):\n # If we have HDMedia, set vars\n if hd_media.findtext('MediaType') == 'Product':\n self.env['aacp_application_install_lang'] = hd_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = hd_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = hd_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = hd_media.findtext('baseVersion')\n\n # If no HDMedia is found, then self.env['aacp_application_install_lang'] will be none\n if not self.env['aacp_application_install_lang']:\n # Get vars for RIBS media\n for ribs_media in option_xml.findall('.//Medias/Media'):\n self.env['aacp_application_install_lang'] = ribs_media.findtext('installLang')\n self.env['aacp_application_sap_code'] = ribs_media.findtext('SAPCode')\n self.env['aacp_target_folder'] = ribs_media.findtext('TargetFolderName')\n self.env['aacp_application_major_version'] = ribs_media.findtext('prodVersion')\n\n # Check for Processor Architecture\n self.env['aacp_application_architecture_type'] = (\n option_xml.findtext('ProcessorArchitecture').lower())\n if not self.env['aacp_application_architecture_type'] in ['arm64', 'macuniversal', 'x64']:\n raise ProcessorError(f\"architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']},\"\n f\" is neither arm64, macuniversal nor x64... exiting...\")\n if self.env['aacp_application_architecture_type'] == 'x64':\n self.env['aacp_application_architecture_type'] = 'x86_64'\n\n # Display progress\n self.output(f\"aacp_application_sap_code: {self.env['aacp_application_sap_code']}\")\n self.output(f\"aacp_target_folder: {self.env['aacp_target_folder']}\")\n self.output(f\"aacp_application_architecture_type: \"\n f\"{self.env['aacp_application_architecture_type']}\")\n self.output(f\"aacp_application_install_lang: {self.env['aacp_application_install_lang']}\")\n self.output(f\"aacp_application_major_version: {self.env['aacp_application_major_version']}\")\n\n # If the we're looking at Acrobat, then we need to process things differently\n if self.env['aacp_application_sap_code'] == 'APRO':\n self.process_apro_installer()\n else:\n # Set application_json_path\n self.env['aacp_application_json_path'] = os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/HD',\n self.env['aacp_target_folder'],\n 'Application.json')\n # Process HD installer\n self.process_hd_installer()\n\n\n def process_apro_installer(self):\n '''\n Process APRO (Acrobat) installer\n '''\n\n # Progress notification\n self.output(\"Processing Acrobat installer\")\n self.env['aacp_proxy_xml_path'] = (os.path.join(self.env['aacp_install_pkg_path'],\n 'Contents/Resources/Setup',\n self.env['aacp_target_folder'],\n 'proxy.xml'))\n self.output(f\"aacp_proxy_xml_path: {self.env['aacp_proxy_xml_path']}\")\n\n # Try to parse proxy_xml, raise if an issue\n try:\n parse_xml = ElementTree.parse(self.env['aacp_proxy_xml_path'])\n except xml.etree.ElementTree.ParseError as err_msg:\n raise ProcessorError from err_msg\n\n # Get root of xml\n root = parse_xml.getroot()\n\n # Get app_version\n self.env['version'] = (root.findtext\n ('./InstallerProperties/Property[@name=\\'ProductVersion\\']'))\n self.output(f\"version: {self.env['version']}\")\n\n # Set to []\n self.env['aacp_blocking_applications'] = []\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(None)\n\n\n def process_hd_installer(self):\n '''\n Process HD installer\n '''\n\n # Var declaration\n blocking_applications = []\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_application_json_path']}...\")\n\n # Read in app_json file\n with open(self.env['aacp_application_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n load_json = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get description\n tag_lines = load_json[\"ProductDescription\"][\"Tagline\"][\"Language\"]\n for tag_line in tag_lines:\n if tag_line['locale'] == self.env['aacp_application_install_lang']:\n self.env['aacp_application_description'] = tag_line['value']\n # Add a . if missing from the end\n if not self.env['aacp_application_description'].endswith('.'):\n self.env['aacp_application_description'] = (\n self.env['aacp_application_description'] + '.')\n self.output(f\"aacp_application_description: \"\n f\"{self.env['aacp_application_description']}\")\n\n # Get conflicting processes\n conflicting_processes = load_json['ConflictingProcesses']['ConflictingProcess']\n for conflicting_process in conflicting_processes:\n # Only add if forceKillAllowed is False\n if not conflicting_process['forceKillAllowed']:\n blocking_applications.append(conflicting_process['ProcessDisplayName'])\n if blocking_applications:\n self.env['aacp_blocking_applications'] = sorted(set(blocking_applications))\n self.output(f\"aacp_blocking_applications: \"\n f\"{self.env['aacp_blocking_applications']}\")\n\n # 2nd part of process\n self.process_adobe_autopkg_application_data(load_json)\n\n\n def process_adobe_autopkg_application_data(self, load_json):\n '''\n Get more details from AdobeAutoPkgApplicationData.json\n '''\n\n # var declaration\n self.env['aacp_matched_json'] = None\n\n # Get this scripts parent directory\n self.env['aacp_parent_dir'] = os.path.dirname(os.path.realpath(__file__))\n self.output(f\"aacp_parent_dir: {self.env['aacp_parent_dir']}\")\n\n # Get the path to AdobeAutoPkgApplicationData.json\n self.env['aacp_json_path'] = os.path.join(self.env['aacp_parent_dir'],\n 'AdobeAutoPkgApplicationData.json')\n self.output(f\"aacp_json_path: {self.env['aacp_json_path']}\")\n\n # Progress notification\n self.output(f\"Processing {self.env['aacp_json_path']}...\")\n\n # Read in AdobeAutoPkgApplicationData.json file\n with open(self.env['aacp_json_path'], encoding='utf-8') as json_file:\n # Try to parse app_json as json, raise if an issue\n try:\n self.env['aacp_autopkg_json'] = json.load(json_file)\n except json.JSONDecodeError as err_msg:\n raise ProcessorError from err_msg\n\n # Get applications dict from the json\n for application_data in self.env['aacp_autopkg_json']:\n if application_data['sap_code'] == self.env['aacp_application_sap_code']:\n for aacp_version_json in application_data['versions'].keys():\n try:\n if aacp_version_json == self.env['aacp_application_major_version']:\n self.env['aacp_matched_json'] = (application_data['versions']\n [self.env['aacp_application_major_version']])\n self.output(f\"aacp_matched_json: {self.env['aacp_matched_json']}\")\n except KeyError as err_msg:\n raise ProcessorError(f\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: \"\n f\"{self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}... \"\n f\"exiting...\") from err_msg\n\n # If we found a match\n if self.env['aacp_matched_json']:\n self.process_matched_json(load_json)\n else:\n raise ProcessorError(\"Cannot find details for \"\n f\"{self.env['aacp_application_sap_code']} \"\n f\"with version: {self.env['aacp_application_major_version']},\"\n f\" in {self.env['aacp_json_path']}...\")\n\n\n def process_matched_json(self, load_json):\n '''\n Get metadata with the aid of self.env['aacp_matched_json']\n '''\n # Applications version, if not APRO\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['version'] = load_json[self.env['aacp_matched_json']['app_json_version_key']]\n self.output(f\"version: {self.env['version']}\")\n\n # If the version is unsupported\n if 'unsupported_versions_dict' in self.env['aacp_matched_json']:\n self.output(f\"unsupported_versions_dict: {self.env['aacp_matched_json']\"\n f\"['unsupported_versions_dict']}\")\n for unsupported_version in self.env['aacp_matched_json']['unsupported_versions_dict']:\n if unsupported_version == self.env['version']:\n raise ProcessorError(f\"{self.env['aacp_matched_json']['unsupported_versions_dict']\"\n f\"[unsupported_version]}\")\n\n # Applications bundle id\n self.env['aacp_application_bundle_id'] = self.env['aacp_matched_json']['app_bundle_id']\n self.output(f\"aacp_application_bundle_id: {self.env['aacp_application_bundle_id']}\")\n\n # Applications minimum os, if APRO get from self.env['aacp_matched_json']\n if not self.env['aacp_application_sap_code'] == 'APRO':\n self.env['aacp_application_minimum_os'] = (re.search(self.env['aacp_matched_json']\n ['minos_regex'],\n load_json['SystemRequirement']\n ['CheckCompatibility']\n ['Content'])[1])\n self.output(f\"aacp_application_minimum_os: {self.env['aacp_application_minimum_os']}\")\n else:", "type": "random" } ]
[ " self.create_pkginfo()", " self.replace_element(some_key)", " self.process_adobe_autopkg_application_data(load_json)", " self.process_optionxml_xml()", " self.process_apro_installer()", " self.process_hd_installer()", " PROCESSOR = AdobeAdminConsolePackagesPkgInfoCreator()", " self.process_adobe_autopkg_application_data(None)", " self.process_matched_json(load_json)", " self.env['aacp_autopkg_json'] = json.load(json_file)", " load_json = json.load(json_file)", "", " '''", " if not 'aacp_application_description' in self.env:", " input_variables = {", " if self.env['aacp_application_display_name']:", " 'proxy.xml'))", " self.env['aacp_application_minimum_os'] = self.env['aacp_matched_json']['minos_version']" ]
METASEP
35
vizzuhq__ipyvizzu
vizzuhq__ipyvizzu METASEP src/ipyvizzu/template.py METASEP """A module for storing the JavaScript templates.""" from enum import Enum class ChartProperty(Enum): """An enum class for storing chart properties.""" CONFIG = "config" """An enum key-value for storing config chart property.""" STYLE = "style" """An enum key-value for storing style chart property.""" class DisplayTarget(Enum): """An enum class for storing chart display options.""" BEGIN = "begin" """Display all animation steps after the constructor's cell.""" END = "end" """Display all animation steps after the last running cell.""" ACTUAL = "actual" """Display the actual animation step after the currently running cell.""" MANUAL = "manual" """Display all animation steps after calling a show method.""" class DisplayTemplate: """A class for storing JavaScript snippet templates.""" # pylint: disable=too-few-public-methods IPYVIZZUJS: str = "{ipyvizzujs}" """ipyvizzu JavaScript class.""" INIT: str = ( "window.ipyvizzu.createChart(element, " + "'{chart_id}', '{vizzu}', '{div_width}', '{div_height}');" ) """Call createChart JavaScript method.""" ANIMATE: str = ( "window.ipyvizzu.animate(element, " + "'{chart_id}', '{display_target}', {scroll}, " + "lib => {{ return {chart_target} }}, {chart_anim_opts});" ) """Call animate JavaScript method.""" FEATURE: str = ( "window.ipyvizzu.feature(element, '{chart_id}', '{name}', {enabled});" ) """Call feature JavaScript method.""" STORE: str = "window.ipyvizzu.store(element, '{chart_id}', '{id}');" """Call store JavaScript method.""" SET_EVENT: str = ( "window.ipyvizzu.setEvent(element, " + "'{chart_id}', '{id}', '{event}', event => {{ {handler} }});" ) """Call setEvent JavaScript method.""" CLEAR_EVENT: str = ( "window.ipyvizzu.clearEvent(element, '{chart_id}', '{id}', '{event}');" ) """Call clearEvent JavaScript method.""" LOG: str = "window.ipyvizzu.log(element, '{chart_id}', '{chart_property}');" """Call log JavaScript method.""" CLEAR_INHIBITSCROLL: str = ( "if (window.IpyVizzu) { window.IpyVizzu.clearInhibitScroll(element); }" ) """Call clearInhibitScroll JavaScript method if ipyvizzu JavaScript class exists.""" src/ipyvizzu/schema.py METASEP """A module for storing the data schema.""" NAMED_SCHEMA = { "type": "array", "items": { "type": "object", "properties": { "name": {"type": "string"}, "values": {"type": "array", "optional": True}, "type": {"type": "string", "optional": True}, }, "required": ["name"], }, } """`dict`: Store the schema of the `series`, `dimensions` and `measures` data types.""" RECORD_SCHEMA = {"type": "array", "items": {"type": "array"}} """`dict`: Store the schema of the `records` data type.""" DATA_SCHEMA = { "type": "object", "oneOf": [ { "properties": { "series": NAMED_SCHEMA, "records": RECORD_SCHEMA, "filter": {"optional": True}, }, "additionalProperties": False, }, { "properties": { "dimensions": NAMED_SCHEMA, "measures": NAMED_SCHEMA, "filter": {"optional": True}, }, "additionalProperties": False, "required": ["dimensions", "measures"], }, ], } """`dict`: Store the schema of the data animation.""" src/ipyvizzu/method.py METASEP """A module for working with template methods.""" import json from typing import Optional, Union from ipyvizzu.animation import PlainAnimation, Animation, AnimationMerger from ipyvizzu.event import EventHandler from ipyvizzu.template import ChartProperty class Method: """A class for storing and dumping any kind of data.""" # pylint: disable=too-few-public-methods _data: dict def dump(self) -> dict: """ A method for returning the stored data. Returns: The stored data. """ return self._data class Animate(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.ANIMATE][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__( self, chart_target: Union[Animation, AnimationMerger], chart_anim_opts: Optional[dict] = None, ): """ Animate constructor. It stores and dumps `chart_target` and `chart_anim_opts` parameters. Args: chart_target: Animation object such as [Data][ipyvizzu.animation.Data] [Config][ipyvizzu.animation.Config] or [Style][ipyvizzu.animation.Style]. chart_anim_opts: Animation options' dictionary. If it is not set, it dumps `undefined`. """ self._data = { "chart_target": chart_target.dump(), "chart_anim_opts": PlainAnimation(chart_anim_opts).dump() if chart_anim_opts else "undefined", } class Feature(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.FEATURE][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__(self, name: str, enabled: bool): """ Feature constructor. It stores and dumps `name` and `enabled` parameters. Args: name: The name of a chart feature. enabled: The new state of a chart feature. """ self._data = {"name": name, "enabled": json.dumps(enabled)} class Store(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.STORE][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__(self, snapshot_id: str): """ Store constructor. It stores and dumps `snapshot_id` parameter. Args: snapshot_id: The id of snapshot object. """ self._data = {"id": snapshot_id} class EventOn(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.SET_EVENT][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__(self, event_handler: EventHandler): """ EventOn constructor. It stores and dumps the `id`, the `event` and the `handler` of the event handler object. Args: event_handler: An event handler object. """ self._data = { "id": event_handler.id, "event": event_handler.event, "handler": event_handler.handler, } class EventOff(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.CLEAR_EVENT][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__(self, event_handler: EventHandler): """ EventOff constructor. It stores and dumps the `id` and the `event` of the event handler object. Args: event_handler: An event handler object. """ self._data = {"id": event_handler.id, "event": event_handler.event} class Log(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.LOG][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__(self, chart_property: ChartProperty): """ Log constructor. It stores and dumps the value of the chart property object. Args: chart_property: A chart property such as [CONFIG][ipyvizzu.template.ChartProperty] and [STYLE][ipyvizzu.template.ChartProperty]. """ self._data = {"chart_property": chart_property.value} src/ipyvizzu/json.py METASEP """A module for working JavaScript code in json convertible objects.""" import json from typing import Optional import uuid class RawJavaScript: """A class for representing raw JavaScript code.""" # pylint: disable=too-few-public-methods def __init__(self, raw: Optional[str]): """ RawJavaScript constructor. It stores raw JavaScript code as a string. Args: raw: JavaScript code as `str`. """ self._raw = raw @property def raw(self) -> Optional[str]: """ A property for storing raw JavaScript code as a string. Returns: Raw JavaScript code as `str`. """ return self._raw class RawJavaScriptEncoder(json.JSONEncoder): """ A class for representing a custom json encoder, it can encode objects that contain [RawJavaScript][ipyvizzu.json.RawJavaScript] values. """ def __init__(self, *args, **kwargs): """ RawJavaScriptEncoder constructor. It extends `json.JSONEncoder` with an instance variable (`_raw_replacements`). The `_raw_replacements` dictionary stores the `uuids` and JavaScript codes of the [RawJavaScript][ipyvizzu.json.RawJavaScript] objects. """ json.JSONEncoder.__init__(self, *args, **kwargs) self._raw_replacements = {} def default(self, o): """ Overrides `json.JSONEncoder.default` method. It replaces [RawJavaScript][ipyvizzu.json.RawJavaScript] object with `uuid` and it stores raw JavaScript code with `uuid` key in the `_raw_replacements` dictionary. """ if isinstance(o, RawJavaScript): key = uuid.uuid4().hex self._raw_replacements[key] = o.raw return key return json.JSONEncoder.default(self, o) def encode(self, o): """ Overrides `json.JSONEncoder.encode` method. It replaces `uuids` with raw JavaScript code without apostrophes. """ result = json.JSONEncoder.encode(self, o) for key, val in self._raw_replacements.items(): result = result.replace(f'"{key}"', val) return result src/ipyvizzu/event.py METASEP """A module for working with JavaScript events""" import uuid class EventHandler: """A class for representing an event handler.""" def __init__(self, event: str, handler: str): """ EventHandler constructor. It generates a uuid for the event handler, stores the event type and the body of the handler function. Args: event: The type of the event. handler: The body of the handler function. """ self._id = uuid.uuid4().hex[:7] self._event = event self._handler = " ".join(handler.split()) @property def id(self) -> str: # pylint: disable=invalid-name """ A property for storing an id. Returns: The uuid of the event handler. """ return self._id @property def event(self) -> str: """ A property for storing an event type. Returns: The type of the event. """ return self._event @property def handler(self) -> str: """ A property for storing an event handler function. Returns: The body of the handler function. """ return self._handler src/ipyvizzu/chart.py METASEP """A module for working with Vizzu charts.""" import pkgutil import uuid from typing import List, Optional, Union, Tuple from IPython.display import display_javascript # type: ignore from IPython import get_ipython # type: ignore from ipyvizzu.animation import Animation, Snapshot, AnimationMerger from ipyvizzu.method import Animate, Feature, Store, EventOn, EventOff, Log from ipyvizzu.template import ChartProperty, DisplayTarget, DisplayTemplate from ipyvizzu.event import EventHandler class Chart: """A class for representing a wrapper over Vizzu chart.""" VIZZU: str = "https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js" """A variable for storing the default url of vizzu package.""" def __init__( self, vizzu: Optional[str] = VIZZU, width: Optional[str] = "800px", height: Optional[str] = "480px", display: Optional[Union[DisplayTarget, str]] = DisplayTarget.ACTUAL, ): """ Chart constructor. Args: vizzu: The url of Vizzu JavaScript package. width: The width of the chart. height: The height of the chart. display: The display behaviour of the chart. """ self._chart_id = uuid.uuid4().hex[:7] self._display_target = DisplayTarget(display) self._calls: List[str] = [] self._showed = False self._scroll_into_view = False ipyvizzurawjs = pkgutil.get_data(__name__, "templates/ipyvizzu.js") ipyvizzujs = ipyvizzurawjs.decode("utf-8") # type: ignore self._display(DisplayTemplate.IPYVIZZUJS.format(ipyvizzujs=ipyvizzujs)) self._display( DisplayTemplate.INIT.format( chart_id=self._chart_id, vizzu=vizzu, div_width=width, div_height=height, ) ) if self._display_target != DisplayTarget.MANUAL: self._register_events() @staticmethod def _register_events() -> None: ipy = get_ipython() if ipy is not None: ipy.events.register("pre_run_cell", Chart._register_pre_run_cell) @staticmethod def _register_pre_run_cell() -> None: display_javascript(DisplayTemplate.CLEAR_INHIBITSCROLL, raw=True) @property def scroll_into_view(self) -> bool: """ A property for turning on/off the scroll into view feature. Returns: The value of the property (default `False`). """ return self._scroll_into_view @scroll_into_view.setter def scroll_into_view(self, scroll_into_view: Optional[bool]): self._scroll_into_view = bool(scroll_into_view) def animate( self, *animations: Animation, **options: Optional[Union[str, int, float, dict]] ) -> None: """ A method for changing the state of the chart. Args: *animations: List of Animation objects such as [Data][ipyvizzu.animation.Data], [Config][ipyvizzu.animation.Config] and [Style][ipyvizzu.animation.Style]. **options: Dictionary of animation options for example `duration=1`. Raises: ValueError: If `animations` is not set. Example: Reset the chart styles: chart.animate(Style(None)) """ if not animations: raise ValueError("No animation was set.") animation = self._merge_animations(animations) animate = Animate(animation, options) self._display( DisplayTemplate.ANIMATE.format( display_target=self._display_target.value, chart_id=self._chart_id, scroll=str(self._scroll_into_view).lower(), **animate.dump(), ) ) @staticmethod def _merge_animations( animations: Tuple[Animation, ...], ) -> Union[Animation, AnimationMerger]: if len(animations) == 1: return animations[0] merger = AnimationMerger() for animation in animations: merger.merge(animation) return merger def feature(self, name: str, enabled: bool) -> None: """ A method for turning on/off features of the chart. Args: name: The name of the chart feature. enabled: The new state of the chart feature. Example: Turn on `tooltip` of the chart: chart.feature("tooltip", True) """ self._display( DisplayTemplate.FEATURE.format( chart_id=self._chart_id, **Feature(name, enabled).dump(), ) ) def store(self) -> Snapshot: """ A method for saving and storing the actual state of the chart. Returns: A snapshot animation object wich stores the actual state of the chart. Example: Save and restore the actual state of the chart: snapshot = chart.store() ... chart.animate(snapshot) """ snapshot_id = uuid.uuid4().hex[:7] self._display( DisplayTemplate.STORE.format( chart_id=self._chart_id, **Store(snapshot_id).dump() ) ) return Snapshot(snapshot_id) def on( # pylint: disable=invalid-name self, event: str, handler: str ) -> EventHandler: """ A method for creating and turning on an event handler. Args: event: The type of the event. handler: The JavaScript method of the event. Returns: The turned on event handler object. Example: Turn on an event handler which prints an alert message when someone clicks on the chart: handler = chart.on("click", "alert(JSON.stringify(event.data));") """ event_handler = EventHandler(event, handler) self._display( DisplayTemplate.SET_EVENT.format( chart_id=self._chart_id, **EventOn(event_handler).dump(), ) ) return event_handler def off(self, event_handler: EventHandler) -> None: """ A method for turning off an event handler. Args: event_handler: A previously created event handler object. Example: Turn off a previously created event handler: chart.off(handler) """ self._display( DisplayTemplate.CLEAR_EVENT.format( chart_id=self._chart_id, **EventOff(event_handler).dump(), ) ) def log(self, chart_property: ChartProperty) -> None: """ A method for printing chart properties to the browser console. Args: chart_property: A chart property such as [CONFIG][ipyvizzu.template.ChartProperty] and [STYLE][ipyvizzu.template.ChartProperty]. Example: Log the actual style of the chart to the browser console: chart.log(ChartProperty.STYLE) """ self._display( DisplayTemplate.LOG.format( chart_id=self._chart_id, **Log(chart_property).dump() ) ) def _repr_html_(self) -> str: assert ( self._display_target == DisplayTarget.MANUAL ), "chart._repr_html_() can be used with display=DisplayTarget.MANUAL only" assert not self._showed, "cannot be used after chart displayed." self._showed = True html_id = uuid.uuid4().hex[:7] script = ( self._calls[0] + "\n" + "\n".join(self._calls[1:]).replace( "element", f'document.getElementById("{html_id}")' ) ) return f'<div id="{html_id}"><script>{script}</script></div>' def show(self) -> None: """ A method for displaying the assembled JavaScript code. Raises: AssertionError: If [display][ipyvizzu.Chart.__init__] is not [DisplayTarget.MANUAL][ipyvizzu.template.DisplayTarget]. AssertionError: If chart already has been displayed. """ assert ( self._display_target == DisplayTarget.MANUAL ), "chart.show() can be used with display=DisplayTarget.MANUAL only" assert not self._showed, "cannot be used after chart displayed" display_javascript( "\n".join(self._calls), raw=True, ) self._showed = True def _display(self, javascript: str) -> None: if self._display_target != DisplayTarget.MANUAL: display_javascript( javascript, raw=True, ) else: assert not self._showed, "cannot be used after chart displayed" self._calls.append(javascript) src/ipyvizzu/animation.py METASEP """A module for working with chart animations.""" import abc from enum import Enum from os import PathLike import json from typing import Optional, Union, List, Any import jsonschema # type: ignore import pandas as pd # type: ignore from pandas.api.types import is_numeric_dtype # type: ignore from ipyvizzu.json import RawJavaScript, RawJavaScriptEncoder from ipyvizzu.schema import DATA_SCHEMA class Animation: """ An abstract class for representing animation objects that have `dump` and `build` methods. """ def dump(self) -> str: """ A method for converting the built dictionary into string. Returns: An str that has been json dumped with [RawJavaScriptEncoder][ipyvizzu.json.RawJavaScriptEncoder] from a dictionary. """ return json.dumps(self.build(), cls=RawJavaScriptEncoder) @abc.abstractmethod def build(self) -> dict: """ An abstract method for returning a dictionary with values that can be converted into json string. Returns: A dictionary that stored in the animation object. """ class PlainAnimation(dict, Animation): """ A class for representing plain animation. It can build any dictionary. """ def build(self) -> dict: """ A method for returning the plain animation dictionary. Returns: A dictionary that stored in the plain animation object. """ return self class InferType(Enum): """An enum class for storing data infer types.""" DIMENSION = "dimension" """An enum key-value for storing dimension infer type.""" MEASURE = "measure" """An enum key-value for storing measure infer type.""" class Data(dict, Animation): """ A class for representing data animation. It can build data option of the chart. """ @classmethod def filter(cls, filter_expr: Optional[str] = None): # -> Data: """ A class method for creating a [Data][ipyvizzu.animation.Data] class instance with a data filter. Args: filter_expr: The JavaScript data filter expression. Returns: (Data): A data animation instance that contains a data filter. Example: Create a [Data][ipyvizzu.animation.Data] class with a data filter: filter = Data.filter("record['Genres'] == 'Pop'") """ data = cls() data.set_filter(filter_expr) return data def set_filter(self, filter_expr: Optional[str] = None) -> None: """ A method for adding a filter to an existing [Data][ipyvizzu.animation.Data] class instance. Args: filter_expr: The JavaScript data filter expression. Example: Add a data filter to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.set_filter("record['Genres'] == 'Pop'") """ filter_expr_raw_js = ( RawJavaScript(f"record => {{ return ({' '.join(filter_expr.split())}) }}") if filter_expr is not None else filter_expr ) self.update({"filter": filter_expr_raw_js}) @classmethod def from_json(cls, filename: Union[str, bytes, PathLike]): # -> Data: """ A method for returning a [Data][ipyvizzu.animation.Data] class instance which has been created from a json file. Args: filename: The path of the data source json file. Returns: (Data): A data animation instance that has been created from a json file. """ with open(filename, "r", encoding="utf8") as file_desc: return cls(json.load(file_desc)) def add_record(self, record: list) -> None: """ A method for adding a record to an existing [Data][ipyvizzu.animation.Data] class instance. Args: record: A list that contains data values. Example: Adding a record to a [Data][ipyvizzu.animation.Data] class instance: data = Data() record = ["Pop", "Hard", 114] data.add_record(record) """ self._add_value("records", record) def add_records(self, records: List[list]) -> None: """ A method for adding records to an existing [Data][ipyvizzu.animation.Data] class instance. Args: records: A list that contains data records. Example: Adding records to a [Data][ipyvizzu.animation.Data] class instance: data = Data() records = [ ["Pop", "Hard", 114], ["Rock", "Hard", 96], ["Pop", "Experimental", 127], ["Rock", "Experimental", 83], ] data.add_records(records) """ list(map(self.add_record, records)) def add_series(self, name: str, values: Optional[list] = None, **kwargs) -> None: """ A method for adding a series to an existing [Data][ipyvizzu.animation.Data] class instance. Args: name: The name of the series. values: The data values of the series. **kwargs (Optional): Arbitrary keyword arguments. For example infer type can be set with the `type` keywod argument. Example: Adding a series without values to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.add_series("Genres") Adding a series without values and with infer type to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.add_series("Kinds", type="dimension") Adding a series with values to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.add_series( "Popularity", [114, 96, 127, 83] ) """ self._add_named_value("series", name, values, **kwargs) def add_dimension(self, name: str, values: Optional[list] = None, **kwargs) -> None: """ A method for adding a dimension to an existing [Data][ipyvizzu.animation.Data] class instance. Args: name: The name of the dimension. values: The data values of the dimension. **kwargs (Optional): Arbitrary keyword arguments. Example: Adding a dimension with values to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.add_dimension("Genres", ["Pop", "Rock"]) """ self._add_named_value("dimensions", name, values, **kwargs) def add_measure(self, name: str, values: Optional[list] = None, **kwargs) -> None: """ A method for adding a measure to an existing [Data][ipyvizzu.animation.Data] class instance. Args: name: The name of the measure. values: The data values of the measure. **kwargs (Optional): Arbitrary keyword arguments. Example: Adding a measure with values to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.add_measure( "Popularity", [ [114, 96], [127, 83], ], ) """ self._add_named_value("measures", name, values, **kwargs) def add_data_frame( self, data_frame: Union[pd.DataFrame, pd.core.series.Series], default_measure_value: Optional[Any] = 0, default_dimension_value: Optional[Any] = "", ) -> None: """ A method for adding data frame to an existing [Data][ipyvizzu.animation.Data] class instance. Args: data_frame: The pandas data frame object. default_measure_value: The default measure value to fill the empty values. default_dimension_value: The default dimension value to fill the empty values. Raises: TypeError: If `data_frame` is not instance of `pandas.DataFrame` or `pandas.Series`. Example: Adding a data frame to a [Data][ipyvizzu.animation.Data] class instance: data_frame = pd.DataFrame( { "Genres": ["Pop", "Rock", "Pop", "Rock"], "Kinds": ["Hard", "Hard", "Experimental", "Experimental"], "Popularity": [114, 96, 127, 83], } ) data = Data() data.add_data_frame(data_frame) """ if not isinstance(data_frame, type(None)): if isinstance(data_frame, pd.core.series.Series): data_frame = pd.DataFrame(data_frame) if not isinstance(data_frame, pd.DataFrame): raise TypeError( "data_frame must be instance of pandas.DataFrame or pandas.Series" ) for name in data_frame.columns: values = [] if is_numeric_dtype(data_frame[name].dtype): infer_type = InferType.MEASURE values = ( data_frame[name] .fillna(default_measure_value) .astype(float) .values.tolist() ) else: infer_type = InferType.DIMENSION values = ( data_frame[name] .fillna(default_dimension_value) .astype(str) .values.tolist() ) self.add_series( name, values, type=infer_type.value, ) def add_data_frame_index( self, data_frame: Union[pd.DataFrame, pd.core.series.Series], name: Optional[str], ) -> None: """ A method for adding data frame's index to an existing [Data][ipyvizzu.animation.Data] class instance. Args: data_frame: The pandas data frame object. name: The name of the index series. Raises: TypeError: If `data_frame` is not instance of `pandas.DataFrame` or `pandas.Series`. Example: Adding a data frame's index to a [Data][ipyvizzu.animation.Data] class instance: data_frame = pd.DataFrame( {"Popularity": [114, 96]}, index=["x", "y"] ) data = Data() data.add_data_frame_index(data_frame, "DataFrameIndex") data.add_data_frame(data_frame) """ if data_frame is not None: if isinstance(data_frame, pd.core.series.Series): data_frame = pd.DataFrame(data_frame) if not isinstance(data_frame, pd.DataFrame): raise TypeError( "data_frame must be instance of pandas.DataFrame or pandas.Series" ) self.add_series( str(name), [str(i) for i in data_frame.index], type=InferType.DIMENSION.value, ) def _add_named_value( self, dest: str, name: str, values: Optional[list] = None, **kwargs ) -> None: value = {"name": name, **kwargs} if values is not None: value["values"] = values # type: ignore self._add_value(dest, value) def _add_value(self, dest: str, value: Union[dict, list]) -> None: self.setdefault(dest, []).append(value) def build(self) -> dict: """ A method for validating and returning the data animation dictionary. Returns: A dictionary that stored in the data animation object. It contains a `data` key whose value is the stored animation. """ jsonschema.validate(self, DATA_SCHEMA) return {"data": self} class ConfigAttr(type): """ A metaclass class for the [Config][ipyvizzu.animation.Config] class. Returns a [Config][ipyvizzu.animation.Config] class with a chart preset if the `__getattr__` method called. """ @classmethod def __getattr__(cls, name): config_attr = cls("ConfigAttr", (object,), {"name": name}) return config_attr._get_preset # pylint: disable=no-member def _get_preset(cls, preset): config = Config(RawJavaScript(f"lib.presets.{cls.name}({preset})")) return config class Config(Animation, metaclass=ConfigAttr): """ A class for representing config animation. It can build config option of the chart. """ def __init__(self, data: Optional[dict]): """ Config constructor. Args: data: A config animation dictionary. """ self._data = data def build(self) -> dict: """ A method for returning the config animation dictionary. Returns: A dictionary that stored in the config animation object. It contains a `config` key whose value is the stored animation. """ return {"config": self._data} class Style(Animation): """ A class for representing style animation. It can build style option of the chart. """ def __init__(self, data: Optional[dict]): """ Style constructor. Args: data: A style animation dictionary. """ self._data = data def build(self) -> dict: """ A method for returning the style animation dictionary. Returns: A dictionary that stored in the style animation object. It contains a `style` key whose value is the stored animation. """ return {"style": self._data} class Snapshot(Animation): """ A class for representing snapshot animation. It can build the snapshot id of the chart. """ def __init__(self, name: str): """ Snapshot constructor. Args: name: A snapshot id. """ self._name = name def dump(self) -> str: """ A method for overwriting the [Animation.build][ipyvizzu.animation.Animation.build] method. It dumps the stored snapshot id as a string. Returns: An str that contains the stored snapshot id. """ return f"'{self._name}'" def build(self): """ A method for preventing to merge [Snapshot][ipyvizzu.animation.Snapshot] with other animations. Raises: NotImplementedError: If the [build][ipyvizzu.animation.Snapshot.build] method has been called, because [Snapshot][ipyvizzu.animation.Snapshot] cannot be merged with other animations. """ raise NotImplementedError("Snapshot cannot be merged with other animations") class AnimationMerger(dict, Animation): """A class for merging different types of animations.""" def merge(self, animation: Animation) -> None: """ A method for merging an animation with the previously merged animations. Args: animation: An animation to be merged with with previously merged animations. Raises: ValueError: If the type of an animation is already merged. """ data = self._validate(animation) self.update(data) def _validate(self, animation: Animation) -> dict: data = animation.build() common_keys = set(data).intersection(self) if common_keys: raise ValueError(f"Animation is already merged: {common_keys}") return data def build(self) -> dict: """ A method for returning a merged dictionary from different types of animations. Returns: A merged dictionary from [Data][ipyvizzu.animation.Data], [Config][ipyvizzu.animation.Config] and [Style][ipyvizzu.animation.Style] animations. """ return self src/ipyvizzu/__init__.py METASEP """ Build animated charts in Jupyter Notebook and in many other environments with a simple Python syntax. """ from .chart import Chart from .animation import ( Animation, PlainAnimation, InferType, Data, Config, Style, Snapshot, AnimationMerger, ) from .method import Method, Animate, Feature, Store, EventOn, EventOff, Log from .json import RawJavaScript, RawJavaScriptEncoder from .template import ChartProperty, DisplayTarget, DisplayTemplate from .event import EventHandler tests/test_method.py METASEP """A module for testing the ipyvizzu.method module.""" import unittest from ipyvizzu import ( Method, Animate, Feature, Store, EventOn, EventOff, Log, Config, Style, Snapshot, AnimationMerger, EventHandler, ChartProperty, ) class TestMethod(unittest.TestCase): """A class for testing Method class and its derived classes.""" def test_method(self) -> None: """ A method for testing Method.dump method return value. Raises: AssertionError: If AttributeError is not occurred. """ method = Method() with self.assertRaises(AttributeError): method.dump() def test_animate_with_anim_without_option(self) -> None: """ A method for testing Animate class which is initialized with an Animation as `chart_target` and without `chart_anim_opts` parameters. It tests Animate.dump method return value. Raises: AssertionError: If the dumped value is not correct. """ animation = Snapshot(name="abc1234") method = Animate(chart_target=animation) self.assertEqual( { "chart_target": "'abc1234'", "chart_anim_opts": "undefined", }, method.dump(), ) def test_animate_with_animmerger_without_option(self) -> None: """ A method for testing Animate class which is initialized with an AnimationMerger as `chart_target` and without `chart_anim_opts` parameters. It tests Animate.dump method return value. Raises: AssertionError: If the dumped value is not correct. """ config = Config({"title": "My first chart"}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) animation_merger = AnimationMerger() animation_merger.merge(config) animation_merger.merge(style) method = Animate(chart_target=animation_merger) self.assertEqual( { "chart_target": '{"config": ' + '{"title": "My first chart"}, ' + '"style": {"title": {"backgroundColor": "#A0A0A0"}}}', "chart_anim_opts": "undefined", }, method.dump(), ) def test_animate_with_anim_with_option(self) -> None: """ A method for testing Animate class which is initialized with an Animation as `chart_target` and with `chart_anim_opts` parameters. It tests Animate.dump method return value. Raises: AssertionError: If the dumped value is not correct. """ animation = Snapshot(name="abc1234") option = {"duration": 1, "easing": "linear"} method = Animate(chart_target=animation, chart_anim_opts=option) self.assertEqual( { "chart_target": "'abc1234'", "chart_anim_opts": '{"duration": 1, "easing": "linear"}', }, method.dump(), ) def test_animate_with_animmerger_with_option(self) -> None: """ A method for testing Animate class which is initialized with an AnimationMerger as `chart_target` and with `chart_anim_opts` parameters. It tests Animate.dump method return value. Raises: AssertionError: If the dumped value is not correct. """ config = Config({"title": "My first chart"}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) animation_merger = AnimationMerger() animation_merger.merge(config) animation_merger.merge(style) option = {"duration": 1, "easing": "linear"} method = Animate(chart_target=animation_merger, chart_anim_opts=option) self.assertEqual( { "chart_target": '{"config": ' + '{"title": "My first chart"}, ' + '"style": {"title": {"backgroundColor": "#A0A0A0"}}}', "chart_anim_opts": '{"duration": 1, "easing": "linear"}', }, method.dump(), ) def test_feature(self) -> None: """ A method for testing Feature class which is initialized with `name` and `enabled` parameters. It tests Feature.dump method return value. Raises: AssertionError: If the dumped value is not correct. """ method = Feature(name="tooltip", enabled=True) self.assertEqual({"name": "tooltip", "enabled": "true"}, method.dump()) def test_store(self) -> None: """ A method for testing Store class which is initialized with `snapshot_id` parameter. It tests Store.dump method return value. Raises: AssertionError: If the dumped value is not correct. """ method = Store(snapshot_id="abc1234") self.assertEqual({"id": "abc1234"}, method.dump()) def test_event_on(self) -> None: """ A method for testing EventOn class which is initialized with `event_handler` parameter. It tests EventOn.dump method return value. Raises: AssertionError: If the dumped value is not correct. """ event_handler = EventHandler( event="click", handler="alert(JSON.stringify(event.data));" ) method = EventOn(event_handler=event_handler) method_dump = method.dump() self.assertEqual( { "id": method_dump["id"], "event": "click", "handler": "alert(JSON.stringify(event.data));", }, method_dump, ) def test_event_off(self) -> None: """ A method for testing EventOff class which is initialized with `event_handler` parameter. It tests EventOff.dump method return value. Raises: AssertionError: If the dumped value is not correct. """ event_handler = EventHandler( event="click", handler="alert(JSON.stringify(event.data));" ) method = EventOff(event_handler=event_handler) method_dump = method.dump() self.assertEqual( { "id": method_dump["id"], "event": "click", }, method_dump, ) def test_log(self) -> None: """ A method for testing Log class which is initialized with `chart_property` parameter. It tests Log.dump method return value. Raises: AssertionError: If the dumped value is not correct. """ method = Log(chart_property=ChartProperty.CONFIG) self.assertEqual( { "chart_property": "config", }, method.dump(), ) tests/test_json.py METASEP """A module for testing the ipyvizzu.json module.""" import json import unittest from ipyvizzu import RawJavaScriptEncoder, RawJavaScript class TestRawJavaScriptEncoder(unittest.TestCase): """A class for testing RawJavaScriptEncoder class.""" def test_encoder_with_rawjavascript(self) -> None: """ A method for testing RawJavaScriptEncoder with RawJavaScript object. Raises: AssertionError: If the dumped value is not correct. """ raw_javascript = RawJavaScript("null") self.assertEqual( json.dumps({"test": raw_javascript}, cls=RawJavaScriptEncoder), '{"test": null}', ) def test_encoder_with_not_rawjavascript(self) -> None: """ A method for testing RawJavaScriptEncoder with NotRawJavaScript object. Raises: AssertionError: If TypeError is not occurred. """ class NotRawJavaScript: """A class for representing a custom object which is not RawJavaScript.""" # pylint: disable=too-few-public-methods def __init__(self): """NotRawJavaScript constructor.""" not_raw_javascript = NotRawJavaScript() with self.assertRaises(TypeError): json.dumps({"test": not_raw_javascript}, cls=RawJavaScriptEncoder) tests/test_chart.py METASEP """A module for testing the ipyvizzu.chart module.""" import abc import unittest import unittest.mock from typing import Callable from normalizer import Normalizer from ipyvizzu import Chart, ChartProperty, Data, Config, Snapshot, Style, EventHandler class TestChart(unittest.TestCase, abc.ABC): """ An abstract class for testing Chart class. It is responsible for setup and teardown. """ normalizer: Normalizer @classmethod def setUpClass(cls) -> None: cls.normalizer = Normalizer() def setUp(self) -> None: self.patch = unittest.mock.patch(self.mock) self.trash = self.patch.start() self.chart = Chart() def tearDown(self) -> None: self.patch.stop() @property def mock(self) -> str: """ A property for storing the method's name that needs to be mocked. Returns: The mocked method's name. """ return "ipyvizzu.chart.display_javascript" class TestChartInit(TestChart): """ A class for testing Chart class. It tests the constructor. """ def test_init(self) -> None: """ A method for testing the default constructor parameters. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: Chart() self.assertEqual( self.normalizer.normalize_output(output, 1), "window.ipyvizzu.createChart(" + "element, " + "id, " + "'https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js', " + "'800px', '480px');", ) def test_init_vizzu(self) -> None: """ A method for testing the "vizzu" constructor parameter. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: Chart(vizzu="https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js") self.assertEqual( self.normalizer.normalize_output(output, 1), "window.ipyvizzu.createChart(" + "element, " + "id, " + "'https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js', " + "'800px', '480px');", ) def test_init_div(self) -> None: """ A method for testing the "width" and "height" constructor parameters. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: Chart(width="400px", height="240px") self.assertEqual( self.normalizer.normalize_output(output, 1), "window.ipyvizzu.createChart(" + "element, " + "id, " + "'https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js', " + "'400px', '240px');", ) def test_init_display_invalid(self) -> None: """ A method for testing the "display" constructor parameter (display=invalid). Raises: AssertionError: If ValueError is not occurred. """ with self.assertRaises(ValueError): Chart(display="invalid") def test_init_display_begin(self) -> None: """ A method for testing the "display" constructor parameter (display=begin). Raises: AssertionError: If the normalized output is not correct. """ self.chart = Chart(display="begin") with unittest.mock.patch(self.mock) as output: self.chart.animate(Snapshot("abc1234")) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'begin', false, " + "lib => { return id }, " + "undefined);", ) def test_init_display_actual(self) -> None: """ A method for testing the "display" constructor parameter (display=actual). Raises: AssertionError: If the normalized output is not correct. """ self.chart = Chart(display="actual") with unittest.mock.patch(self.mock) as output: self.chart.animate(Snapshot("abc1234")) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'actual', false, " + "lib => { return id }, " + "undefined);", ) def test_init_display_end(self) -> None: """ A method for testing the "display" constructor parameter (display=end). Raises: AssertionError: If the normalized output is not correct. """ self.chart = Chart(display="end") with unittest.mock.patch(self.mock) as output: self.chart.animate(Snapshot("abc1234")) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'end', false, " + "lib => { return id }, " + "undefined);", ) def test_init_register_events(self) -> None: """ A method for testing Chart._register_events method. Raises: AssertionError: If the normalized output is not correct. """ class IPyEvents: """A class for mocking get_ipython.events object.""" # pylint: disable=too-few-public-methods @staticmethod def register(event: str, function: Callable) -> None: """A method for mocking get_ipython.events.register method.""" # pylint: disable=unused-argument function() class IPy: """A class for mocking get_ipython object.""" # pylint: disable=too-few-public-methods events = IPyEvents get_ipython_mock = "ipyvizzu.chart.get_ipython" with unittest.mock.patch(get_ipython_mock, return_value=IPy()): with unittest.mock.patch(self.mock) as output: Chart() self.assertEqual( self.normalizer.normalize_output(output, 2), "if (window.IpyVizzu) { window.IpyVizzu.clearInhibitScroll(element); }", ) class TestChartMethods(TestChart): """ A class for testing Chart class. It tests the ipyvizzu.method related methods. """ def test_animate_chart_target_has_to_be_passed(self) -> None: """ A method for testing Chart.animate method. It raises an error if has ben called without chart target. Raises: AssertionError: If ValueError is not occurred. """ with self.assertRaises(ValueError): self.chart.animate() def test_animate_chart_target_has_to_be_passed_even_if_chart_anim_opts_passed( self, ) -> None: """ A method for testing Chart.animate method. It raises an error if has ben called with anim options only. Raises: AssertionError: If ValueError is not occurred. """ with self.assertRaises(ValueError): self.chart.animate(duration="500ms") def test_animate_one_chart_target(self) -> None: """ A method for testing Chart.animate method. It tests with chart target. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) self.chart.animate(data) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'actual', false, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}} }, ' + "undefined);", ) def test_animate_one_chart_target_with_chart_anim_opts(self) -> None: """ A method for testing Chart.animate method. It tests with chart target and anim options. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) self.chart.animate(data, duration="500ms") self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'actual', false, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}} }, ' + '{"duration": "500ms"});', ) def test_animate_snapshot_chart_target(self) -> None: """ A method for testing Chart.animate method. It tests with Snapshot chart target. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: snapshot = Snapshot("abc1234") self.chart.animate(snapshot) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'actual', false, " + "lib => { return id }, " + "undefined);", ) def test_animate_snapshot_chart_target_with_chart_anim_opts(self) -> None: """ A method for testing Chart.animate method. It tests with Snapshot chart target and anim options. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: snapshot = Snapshot("abc1234") self.chart.animate(snapshot, duration="500ms") self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'actual', false, " + "lib => { return id }, " + '{"duration": "500ms"});', ) def test_animate_more_chart_target(self) -> None: """ A method for testing Chart.animate method. It tests with multiple chart targets. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) config = Config({"channels": {"label": {"attach": ["Popularity"]}}}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) self.chart.animate(data, config, style) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'actual', false, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}, ' + '"config": {"channels": {"label": {"attach": ["Popularity"]}}}, ' + '"style": {"title": {"backgroundColor": "#A0A0A0"}}} }, ' + "undefined);", ) def test_animate_more_chart_target_with_chart_anim_opts(self) -> None: """ A method for testing Chart.animate method. It tests with multiple chart targets and anim options. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) config = Config({"channels": {"label": {"attach": ["Popularity"]}}}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) self.chart.animate(data, config, style, duration="500ms") self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'actual', false, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}, ' + '"config": {"channels": {"label": {"attach": ["Popularity"]}}}, ' + '"style": {"title": {"backgroundColor": "#A0A0A0"}}} }, ' + '{"duration": "500ms"});', ) def test_animate_more_chart_target_with_conflict(self) -> None: """ A method for testing Chart.animate method. It tests with same types of chart target. Raises: AssertionError: If the normalized output is not correct. """ data = Data() data.add_record(["Rock", "Hard", 96]) config1 = Config({"channels": {"label": {"attach": ["Popularity"]}}}) config2 = Config({"title": "Test"}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) with self.assertRaises(ValueError): self.chart.animate(data, config1, style, config2) def test_animate_more_chart_target_with_snapshot(self) -> None: """ A method for testing Chart.animate method. It raises an error if has ben called with multiple chart targets and Snapshot chart target. Raises: AssertionError: If NotImplementedError is not occurred. """ data = Data() data.add_record(["Rock", "Hard", 96]) config = Config({"channels": {"label": {"attach": ["Popularity"]}}}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) snapshot = Snapshot("abc1234") with self.assertRaises(NotImplementedError): self.chart.animate(data, config, style, snapshot) def test_animate_more_calls(self) -> None: """ A method for testing Chart.animate method. It tests multiple calls. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) config1 = Config({"channels": {"label": {"attach": ["Popularity"]}}}) config2 = Config({"title": "Test"}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) self.chart.animate(data, config1, style) self.chart.animate(config2) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'actual', false, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}, ' + '"config": {"channels": {"label": {"attach": ["Popularity"]}}}, ' + '"style": {"title": {"backgroundColor": "#A0A0A0"}}} }, ' + "undefined);\n" + "window.ipyvizzu.animate(element, id, 'actual', false, " + 'lib => { return {"config": {"title": "Test"}} }, ' + "undefined);", ) def test_animate_with_not_default_scroll_into_view(self) -> None: """ A method for testing Chart.animate method. It tests with "scroll_into_view=True". Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) scroll_into_view = not self.chart.scroll_into_view self.chart.scroll_into_view = scroll_into_view self.chart.animate(data) self.assertEqual( self.normalizer.normalize_output(output), f"window.ipyvizzu.animate(element, id, 'actual', {str(scroll_into_view).lower()}, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}} }, ' + "undefined);", ) def test_feature(self) -> None: """ A method for testing Chart.feature method. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: self.chart.feature("tooltip", True) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.feature(element, id, 'tooltip', true);", ) def test_store(self) -> None: """ A method for testing Chart.store method. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: self.chart.store() self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.store(element, id, id);", ) class TestChartEvents(TestChart): """ A class for testing Chart class. It tests the event related methods. """ def test_on(self) -> None: """ A method for testing Chart.on method. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: handler_method = """event.renderingContext.fillStyle = (event.data.text === 'Jazz') ? 'red' : 'gray';""" self.chart.on("plot-axis-label-draw", handler_method) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.setEvent(" + "element, id, id, 'plot-axis-label-draw', " + "event => " + "{ event.renderingContext.fillStyle = " + "(event.data.text === 'Jazz') ? 'red' : 'gray'; });", ) def test_off(self) -> None: """ A method for testing Chart.off method. Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: handler_method = "alert(JSON.stringify(event.data));" handler = EventHandler("click", handler_method) self.chart.off(handler) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.clearEvent(element, id, id, 'click');", ) class TestChartLogs(TestChart): """ A class for testing Chart class. It tests the log related methods. """ def test_log_config(self) -> None: """ A method for testing Chart.log method (ChartProperty.CONFIG). Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: self.chart.log(ChartProperty.CONFIG) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.log(element, id, 'config');", ) def test_log_style(self) -> None: """ A method for testing Chart.log method (ChartProperty.STYLE). Raises: AssertionError: If the normalized output is not correct. """ with unittest.mock.patch(self.mock) as output: self.chart.log(ChartProperty.STYLE) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.log(element, id, 'style');", ) def test_log_invalid(self) -> None: """ A method for testing Chart.log method with an invalid value. Raises: AssertionError: If AttributeError is not occurred. """ with self.assertRaises(AttributeError): self.chart.log(ChartProperty.INVALID) # type: ignore # pylint: disable=no-member class TestChartDisplay(TestChart): """ A class for testing Chart class. It tests the display related methods. """ def test_repr_html_if_display_is_not_manual(self) -> None: """ A method for testing Chart._repr_html_ method (display!=manual). Raises: AssertionError: If AssertionError is not occurred. """ self.chart.animate(Snapshot("abc1234")) with self.assertRaises(AssertionError): self.chart._repr_html_() # pylint: disable=protected-access def test_show_if_display_is_not_manual(self) -> None: """ A method for testing Chart.show method (display!=manual). Raises: AssertionError: If AssertionError is not occurred. """ self.chart.animate(Snapshot("abc1234")) with self.assertRaises(AssertionError): self.chart.show() def test_repr_html(self) -> None: """ A method for testing Chart._repr_html_ method (display=manual). Raises: AssertionError: If the normalized output is not correct. """ self.chart = Chart(display="manual") display_mock = "ipyvizzu.Chart._display" with unittest.mock.patch(display_mock) as output: self.chart.animate(Snapshot("abc1234")) self.assertEqual( self.chart._showed, # pylint: disable=protected-access False, ) self.chart._repr_html_() # pylint: disable=protected-access self.assertEqual( self.chart._showed, # pylint: disable=protected-access True, ) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'manual', false, " + "lib => { return id }, " + "undefined);", ) def test_show(self) -> None: """ A method for testing Chart.show method (display=manual). Raises: AssertionError: If the normalized output is not correct. """ self.chart = Chart(display="manual") display_mock = "ipyvizzu.Chart._display" with unittest.mock.patch(display_mock) as output: self.chart.animate(Snapshot("abc1234")) self.assertEqual( self.chart._showed, # pylint: disable=protected-access False, ) self.chart.show() self.assertEqual( self.chart._showed, # pylint: disable=protected-access True, ) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, 'manual', false, " + "lib => { return id }, " + "undefined);", ) def test_repr_html_after_repr_html(self) -> None: """ A method for testing Chart._repr_html_ method. It raises an error if has ben called after Chart._repr_html_. Raises: AssertionError: If AssertionError is not occurred. """ self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart._repr_html_() # pylint: disable=protected-access with self.assertRaises(AssertionError): self.chart._repr_html_() # pylint: disable=protected-access def test_repr_html_after_show(self) -> None: """ A method for testing Chart._repr_html_ method. It raises an error if has ben called after Chart.show. Raises: AssertionError: If AssertionError is not occurred. """ self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart.show() with self.assertRaises(AssertionError): self.chart._repr_html_() # pylint: disable=protected-access def test_show_after_show(self) -> None: """ A method for testing Chart.show method. It raises an error if has ben called after Chart.show. Raises: AssertionError: If AssertionError is not occurred. """ self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart.show() with self.assertRaises(AssertionError): self.chart.show() def test_show_after_repr_html(self) -> None: """ A method for testing Chart.show method. It raises an error if has ben called after Chart._repr_html_. Raises: AssertionError: If AssertionError is not occurred. """ self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart._repr_html_() # pylint: disable=protected-access with self.assertRaises(AssertionError): self.chart.show() def test_animate_after_repr_html(self) -> None: """ A method for testing Chart.animate method. It raises an error if has ben called after Chart._repr_html_. Raises: AssertionError: If AssertionError is not occurred. """ self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart._repr_html_() # pylint: disable=protected-access with self.assertRaises(AssertionError): self.chart.animate(Snapshot("abc1234")) def test_animate_after_show(self) -> None: """ A method for testing Chart.animate method. It raises an error if has ben called after Chart.show. Raises: AssertionError: If AssertionError is not occurred. """ self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart.show() with self.assertRaises(AssertionError): self.chart.animate(Snapshot("abc1234")) def test_feature_after_repr_html(self) -> None: """ A method for testing Chart.feature method. It raises an error if has ben called after Chart._repr_html_. Raises: AssertionError: If AssertionError is not occurred. """ self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart._repr_html_() # pylint: disable=protected-access with self.assertRaises(AssertionError): self.chart.feature("tooltip", True) def test_feature_after_show(self) -> None: """ A method for testing Chart.feature method. It raises an error if has ben called after Chart.show. Raises: AssertionError: If AssertionError is not occurred. """ self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart.show() with self.assertRaises(AssertionError): self.chart.feature("tooltip", True) def test_store_after_repr_html_(self) -> None: """ A method for testing Chart.store method. It raises an error if has ben called after Chart._repr_html_. Raises: AssertionError: If AssertionError is not occurred. """ self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart._repr_html_() # pylint: disable=protected-access with self.assertRaises(AssertionError): self.chart.store() def test_store_after_show(self) -> None: """ A method for testing Chart.store method. It raises an error if has ben called after Chart.show. Raises: AssertionError: If AssertionError is not occurred. """ self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart.show() with self.assertRaises(AssertionError): self.chart.store() tests/test_animation.py METASEP """A module for testing the ipyvizzu.animation module.""" import json import pathlib import unittest import jsonschema # type: ignore import pandas as pd # type: ignore from ipyvizzu import ( PlainAnimation, Data, Config, Style, Snapshot, AnimationMerger, ) class TestPlainAnimation(unittest.TestCase): """A class for testing PlainAnimation class.""" def test_plainanimation(self) -> None: """A method for testing PlainAnimation.build.""" animation = PlainAnimation(geometry="circle") self.assertEqual({"geometry": "circle"}, animation.build()) class TestDataSchema(unittest.TestCase): """ A class for testing Data class. It tests the data schema validation. """ def setUp(self) -> None: self.data = Data() def test_schema_dimension_only(self) -> None: """ A method for testing Data.build method. It raises an error if added dimension without measure. Raises: AssertionError: If jsonschema.ValidationError is not occurred. """ self.data.add_dimension("Genres", ["Pop", "Rock"]) with self.assertRaises(jsonschema.ValidationError): self.data.build() def test_schema_measure_only(self) -> None: """ A method for testing Data.build method. It raises an error if added measure without dimension. Raises: AssertionError: If jsonschema.ValidationError is not occurred. """ self.data.add_measure("Popularity", [[114, 96]]) with self.assertRaises(jsonschema.ValidationError): self.data.build() def test_schema_data_cube_and_series(self) -> None: """ A method for testing Data.build method. It raises an error if added both dimension/measure and series. Raises: AssertionError: If jsonschema.ValidationError is not occurred. """ self.data.add_dimension("Genres", ["Pop", "Rock"]) self.data.add_measure("Popularity", [[114, 96]]) self.data.add_series("Kinds", ["Hard"]) with self.assertRaises(jsonschema.ValidationError): self.data.build() def test_schema_data_cube_and_records(self) -> None: """ A method for testing Data.build method. It raises an error if added both dimension/measure and records. Raises: AssertionError: If jsonschema.ValidationError is not occurred. """ self.data.add_dimension("Genres", ["Pop", "Rock"]) self.data.add_measure("Popularity", [[114, 96]]) self.data.add_records([["Rock", "Hard", 96], ["Pop", "Hard", 114]]) with self.assertRaises(jsonschema.ValidationError): self.data.build() class TestDataClassmethods(unittest.TestCase): """ A class for testing Data class. It tests the classmethods. """ asset_dir: pathlib.Path @classmethod def setUpClass(cls) -> None: cls.asset_dir = pathlib.Path(__file__).parent / "assets" def test_filter(self) -> None: """ A method for testing Data.filter method with string. Raises: AssertionError: If the dumped value is not correct. """ data = Data.filter("filter_expr") # instead of build() test with dump() because contains raw js self.assertEqual( '{"data": {"filter": record => { return (filter_expr) }}}', data.dump(), ) def test_filter_multiline(self) -> None: """ A method for testing Data.filter method with multi-line string. Raises: AssertionError: If the dumped value is not correct. """ filter_expr = """ A && B || C """ data = Data.filter(filter_expr) # instead of build() test with dump() because contains raw js self.assertEqual( '{"data": {"filter": record => { return (A && B || C) }}}', data.dump(), ) def test_filter_can_be_none(self) -> None: """ A method for testing Data.filter method with None. Raises: AssertionError: If the dumped value is not correct. """ data = Data.filter(None) # instead of build() test with dump() because contains raw js self.assertEqual( '{"data": {"filter": null}}', data.dump(), ) def test_from_json(self) -> None: """ A method for testing Data.from_json method. Raises: AssertionError: If the built value is not correct. """ data = Data.from_json(self.asset_dir / "data_from_json.json") self.assertEqual( { "data": { "dimensions": [ {"name": "Genres", "values": ["Rock", "Pop"]}, {"name": "Kinds", "values": ["Hard"]}, ], "measures": [{"name": "Popularity", "values": [[114, 96]]}], } }, data.build(), ) class TestData(unittest.TestCase): """ A class for testing Data class. It tests the instance methods. """ asset_dir: pathlib.Path @classmethod def setUpClass(cls) -> None: cls.asset_dir = pathlib.Path(__file__).parent / "assets" def setUp(self) -> None: self.data = Data() def test_set_filter(self) -> None: """ A method for testing Data.set_filter method with string. Raises: AssertionError: If the dumped value is not correct. """ self.data.add_records([["Rock", "Hard", 96], ["Pop", "Hard", 114]]) self.data.set_filter("filter_expr") self.assertEqual( '{"data": {"records": ' + '[["Rock", "Hard", 96], ["Pop", "Hard", 114]], ' + '"filter": record => { return (filter_expr) }}}', self.data.dump(), ) def test_set_filter_can_be_none(self) -> None: """ A method for testing Data.set_filter method with None. Raises: AssertionError: If the dumped value is not correct. """ self.data.add_records([["Rock", "Hard", 96], ["Pop", "Hard", 114]]) self.data.set_filter(None) self.assertEqual( '{"data": {"records": [["Rock", "Hard", 96], ["Pop", "Hard", 114]], "filter": null}}', self.data.dump(), ) def test_record(self) -> None: """ A method for testing Data.add_record method. Raises: AssertionError: If the built value is not correct. """ self.data.add_record(["Rock", "Hard", 96]) self.data.add_record(["Pop", "Hard", 114]) self.assertEqual( {"data": {"records": [["Rock", "Hard", 96], ["Pop", "Hard", 114]]}}, self.data.build(), ) def test_records(self) -> None: """ A method for testing Data.add_records method. Raises: AssertionError: If the built value is not correct. """ self.data.add_records([["Rock", "Hard", 96], ["Pop", "Hard", 114]]) self.assertEqual( {"data": {"records": [["Rock", "Hard", 96], ["Pop", "Hard", 114]]}}, self.data.build(), ) def test_series(self) -> None: """ A method for testing Data.add_series method with values. Raises: AssertionError: If the built value is not correct. """ self.data.add_series("Genres", ["Rock", "Pop"], type="dimension") self.data.add_series("Kinds", ["Hard"]) self.data.add_series("Popularity", [96, 114], type="measure") self.assertEqual( { "data": { "series": [ { "name": "Genres", "type": "dimension", "values": ["Rock", "Pop"], }, {"name": "Kinds", "values": ["Hard"]}, {"name": "Popularity", "type": "measure", "values": [96, 114]}, ] } }, self.data.build(), ) def test_series_without_values(self) -> None: """ A method for testing Data.add_series method without values. Raises: AssertionError: If the built value is not correct. """ self.data.add_series("Genres", type="dimension") self.data.add_series("Kinds", type="dimension") self.data.add_series("Popularity", type="measure") records = [["Rock", "Hard", 96], ["Pop", "Hard", 114]] self.data.add_records(records) self.assertEqual( { "data": { "records": [["Rock", "Hard", 96], ["Pop", "Hard", 114]], "series": [ {"name": "Genres", "type": "dimension"}, {"name": "Kinds", "type": "dimension"}, {"name": "Popularity", "type": "measure"}, ], } }, self.data.build(), ) def test_data_cube(self) -> None: """ A method for testing Data.add_dimension and Data.add_measure methods. Raises: AssertionError: If the built value is not correct. """ self.data.add_dimension("Genres", ["Pop", "Rock"]) self.data.add_dimension("Kinds", ["Hard"]) self.data.add_measure("Popularity", [[114, 96]]) self.assertEqual( { "data": { "dimensions": [ {"name": "Genres", "values": ["Pop", "Rock"]}, {"name": "Kinds", "values": ["Hard"]}, ], "measures": [ { "name": "Popularity", "values": [[114, 96]], } ], } }, self.data.build(), ) def test_data_frame_with_not_df(self) -> None: """ A method for testing Data.add_data_frame method. It raises an error if has been called with not valid dataframe. Raises: AssertionError: If TypeError is not occurred. """ data = Data() with self.assertRaises(TypeError): data.add_data_frame("") def test_data_frame_with_none(self) -> None: """ A method for testing Data.add_data_frame method. It tests with None. Raises: AssertionError: If the built value is not correct. """ data = Data() data.add_data_frame(None) self.assertEqual( {"data": {}}, data.build(), ) def test_data_frame(self) -> None: """ A method for testing Data.add_data_frame method. It tests with dataframe. Raises: AssertionError: If the built value is not correct. """ with open(self.asset_dir / "data_frame_in.json", encoding="UTF-8") as fh_in: fc_in = json.load(fh_in) with open(self.asset_dir / "data_frame_out.json", encoding="UTF-8") as fh_out: fc_out = json.load(fh_out) data_frame = pd.DataFrame(fc_in) data_frame = data_frame.astype({"PopularityAsDimension": str}) self.data.add_data_frame(data_frame) self.assertEqual( fc_out, self.data.build(), ) def test_data_frame_na(self) -> None: """ A method for testing Data.add_data_frame method. It tests with dataframe that contains na values. Raises: AssertionError: If the built value is not correct. """ data_frame = pd.read_csv( self.asset_dir / "data_frame_na.csv", dtype={"PopularityAsDimension": str} ) self.data.add_data_frame(data_frame) self.assertEqual( { "data": { "series": [ { "name": "Popularity", "type": "measure", "values": [100.0, 0.0], }, { "name": "PopularityAsDimension", "type": "dimension", "values": ["", "100"], }, ] } }, self.data.build(), ) def test_data_frame_with_pd_series(self) -> None: """ A method for testing Data.add_data_frame method. It tests with pd.Series. Raises: AssertionError: If the built value is not correct. """ data = Data() data.add_data_frame(pd.Series([1, 2], name="series1")) data.add_data_frame( pd.Series({"x": 3, "y": 4, "z": 5}, index=["x", "y"], name="series2") ) self.assertEqual( { "data": { "series": [ {"name": "series1", "type": "measure", "values": [1.0, 2.0]}, {"name": "series2", "type": "measure", "values": [3.0, 4.0]}, ] } }, data.build(), ) def test_data_frame_index_with_not_df(self) -> None: """ A method for testing Data.add_data_frame_index method. It raises an error if has been called with not valid dataframe. Raises: AssertionError: If TypeError is not occurred. """ data = Data() with self.assertRaises(TypeError): data.add_data_frame_index(data_frame="", name="") def test_data_frame_index_with_none_and_none(self) -> None: """ A method for testing Data.add_data_frame_index method. It tests with (None, None). Raises: AssertionError: If the built value is not correct. """ data = Data() data.add_data_frame_index(data_frame=None, name=None) self.assertEqual( {"data": {}}, data.build(), ) def test_data_frame_index_with_df_and_none(self) -> None: """ A method for testing Data.add_data_frame_index method. It tests with (dataframe, None). Raises: AssertionError: If the built value is not correct. """ data = Data() data_frame = pd.DataFrame( pd.Series({"x": 1, "y": 2, "z": 3}, index=["x", "y"], name="series") ) data.add_data_frame_index(data_frame=data_frame, name=None) data.add_data_frame(data_frame) self.assertEqual( { "data": { "series": [ {"name": "None", "type": "dimension", "values": ["x", "y"]}, {"name": "series", "type": "measure", "values": [1.0, 2.0]}, ] } }, data.build(), ) def test_data_frame_index_with_df_and_index(self) -> None: """ A method for testing Data.add_data_frame_index method. It tests with (dataframe, index). Raises: AssertionError: If the built value is not correct. """ data = Data() data_frame = pd.DataFrame({"series": [1, 2, 3]}, index=["x", "y", "z"]) data.add_data_frame_index(data_frame=data_frame, name="Index") data.add_data_frame(data_frame) self.assertEqual( { "data": { "series": [ { "name": "Index", "type": "dimension", "values": ["x", "y", "z"], }, { "name": "series", "type": "measure", "values": [1.0, 2.0, 3.0], }, ] } }, data.build(), ) def test_data_frame_index_with_pd_series(self) -> None: """ A method for testing Data.add_data_frame_index method. It tests with (pd.Series, index). Raises: AssertionError: If the built value is not correct. """ data = Data() data_frame = pd.Series( {"x": 1, "y": 2, "z": 3}, index=["x", "y"], name="series" ) data.add_data_frame_index(data_frame=data_frame, name="Index") data.add_data_frame(data_frame) self.assertEqual( { "data": { "series": [ {"name": "Index", "type": "dimension", "values": ["x", "y"]}, {"name": "series", "type": "measure", "values": [1.0, 2.0]}, ] } }, data.build(), ) class TestConfig(unittest.TestCase): """A class for testing Config class.""" def test_config(self) -> None: """ A method for testing Config.build method. Raises: AssertionError: If the built value is not correct. """ animation = Config({"color": {"set": ["Genres"]}}) self.assertEqual({"config": {"color": {"set": ["Genres"]}}}, animation.build()) def test_config_preset(self) -> None: """ A method for testing Config.__getattr__ method. Raises: AssertionError: If the dumped value is not correct. """ animation = Config.column({"x": "foo", "y": "bar"}) # instead of build() test with dump() because contains raw js self.assertEqual( "{\"config\": lib.presets.column({'x': 'foo', 'y': 'bar'})}", animation.dump(), ) class TestStyle(unittest.TestCase): """A class for testing Style class.""" def test_style(self) -> None: """ A method for testing Style.build method with dictionary. Raises: AssertionError: If the built value is not correct. """ animation = Style({"title": {"backgroundColor": "#A0A0A0"}}) self.assertEqual( {"style": {"title": {"backgroundColor": "#A0A0A0"}}}, animation.build() ) def test_style_can_be_none(self) -> None: """ A method for testing Style.build method with None. Raises: AssertionError: If the built value is not correct. """ animation = Style(None) self.assertEqual({"style": None}, animation.build()) class TestSnapshot(unittest.TestCase): """A class for testing Snapshot class.""" def test_snapshot(self) -> None: """ A method for testing Snapshot.dump method. Raises: AssertionError: If the dumped value is not correct. """ animation = Snapshot("abc1234") self.assertEqual("'abc1234'", animation.dump()) def test_snapshot_can_not_be_built(self) -> None: """ A method for testing Snapshot.build method. It raises an error if has been called. Raises: AssertionError: f NotImplementedError is not occurred. """ animation = Snapshot("abc1234") self.assertRaises(NotImplementedError, animation.build) class TestMerger(unittest.TestCase): """A class for testing AnimationMerger class.""" def setUp(self) -> None: self.merger = AnimationMerger() self.data = Data() self.data.add_record(["Rock", "Hard", 96]) self.config = Config({"channels": {"label": {"attach": ["Popularity"]}}}) def test_merge(self) -> None: """ A method for testing AnimationMerger.merge method with animations. Raises: AssertionError: If the dumped value is not correct. """ self.merger.merge(self.data) self.merger.merge(self.config) self.merger.merge(Style({"title": {"backgroundColor": "#A0A0A0"}})) self.assertEqual( json.dumps( { "data": {"records": [["Rock", "Hard", 96]]}, "config": {"channels": {"label": {"attach": ["Popularity"]}}}, "style": {"title": {"backgroundColor": "#A0A0A0"}}, } ), self.merger.dump(), ) def test_merge_none(self) -> None: """ A method for testing AnimationMerger.merge method with animation that contains None. Raises: AssertionError: If the dumped value is not correct. """ self.merger.merge(self.config) self.merger.merge(Style(None)) self.assertEqual( '{"config": {"channels": {"label": {"attach": ["Popularity"]}}}, "style": null}', self.merger.dump(), ) def test_snapshot_can_not_be_merged(self) -> None: """ A method for testing AnimationMerger.merge method with Snapshot. It raises an error if has been called. Raises: AssertionError: If NotImplementedError is not occurred. """ self.merger.merge(self.data) self.merger.merge(self.config) self.merger.merge(Style({"title": {"backgroundColor": "#A0A0A0"}})) self.assertRaises(NotImplementedError, self.merger.merge, Snapshot("abc1234")) def test_only_different_animations_can_be_merged(self) -> None: """ A method for testing AnimationMerger.merge method with same types of animations. It raises an error if has been called. Raises: AssertionError: If ValueError is not occurred. """ self.merger.merge(self.data) self.merger.merge(self.config) self.merger.merge(Style({"title": {"backgroundColor": "#A0A0A0"}})) data = Data() data.add_record(["Pop", "Hard", 114]) self.assertRaises(ValueError, self.merger.merge, data) self.assertRaises(ValueError, self.merger.merge, Config({"title": "Test"})) self.assertRaises(ValueError, self.merger.merge, Style(None)) tests/normalizer.py METASEP """A module for postprocessing mocked test outputs.""" import re from unittest.mock import MagicMock class Normalizer: """A class for normalizing mocked test outputs.""" def __init__(self): """ Normalizer constructor. It compiles regex expressions. """ self.id1_pattern = re.compile(r"'[a-f0-9]{7}'", flags=re.MULTILINE) self.id2_pattern = re.compile(r"\"[a-f0-9]{7}\"", flags=re.MULTILINE) def normalize_id(self, output: str) -> str: """ A method for replacing uuids with the `id` strings. Args: output: The original output. Returns: The normalized output. """ normalized_output = output normalized_output = self.id1_pattern.sub("id", normalized_output) normalized_output = self.id2_pattern.sub("id", normalized_output) return normalized_output def normalize_output(self, output: MagicMock, start_index: int = 0) -> str: """ A method for merging and normalizing mocked test outputs. Args: output: The original output object. start_index: The start index of merging. Returns: The merged and normalized output. """ output_items = [] for block in output.call_args_list[start_index:]: output_items.append(block.args[0]) return self.normalize_id("\n".join(output_items)).strip() tools/html-generator/preprocessor.py METASEP """A module for preprocessing notebook files.""" import re from nbconvert.preprocessors import Preprocessor # type: ignore class NbPreprocessor(Preprocessor): """ A class for preprocessing notebook cells before converting them to another format. """ def preprocess_cell(self, cell, resources, index): """ Overrides Preprocessor.preprocess_cell method. In markdown cells, it replaces the alignment format and ipynb links with html links. In code cells, it sets IpyVizzu.nbconvert value to true. """ if "source" in cell and cell.cell_type == "markdown": cell.source = re.sub( r"\[([^]]*)\]\(([^)]*)\.ipynb([^]]*)?\)", r"[\1](\2.html\3)", cell.source, ) cell.source = re.sub( r"\<p align\=\"center\"", '<p style="text-align: center"', cell.source, ) if "outputs" in cell and cell.cell_type == "code": for i, output in enumerate(cell.outputs): if "data" in output and "application/javascript" in output["data"]: cell.outputs[i]["data"]["application/javascript"] = re.sub( r"(IpyVizzu.nbconvert = )(false)(;)", r"\1true\3", output["data"]["application/javascript"], ) return cell, resources setup.py METASEP """ ipyvizzu Build animated charts in Jupyter Notebook and in many other environments with a simple Python syntax. """ from setuptools import setup # type: ignore with open("requirements.txt", encoding="utf8") as fp: requirements = fp.read().splitlines() with open("README.md", encoding="utf8") as fp: long_description = fp.read() setup( name="ipyvizzu", version="0.13.0", description="Build animated charts in many environments with a simple Python syntax.", long_description=long_description, long_description_content_type="text/markdown", license="Apache 2", packages=["ipyvizzu"], package_dir={"ipyvizzu": "src/ipyvizzu"}, package_data={"ipyvizzu": ["py.typed", "templates/*.js"]}, python_requires=">=3.6", install_requires=requirements, url="https://github.com/vizzuhq/ipyvizzu", project_urls={ "Documentation": "https://ipyvizzu.vizzuhq.com", "Source": "https://github.com/vizzuhq/ipyvizzu", "Tracker": "https://github.com/vizzuhq/ipyvizzu/issues", }, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Environment :: Console", ], ) tools/mkdocs/gen_files.py METASEP
[ { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):\n SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])\n for item in nav_item:\n SectionIndex.generate(nav_item=item)\n elif isinstance(nav_item, dict):\n for key in nav_item:\n SectionIndex.generate(nav_item=nav_item[key])\n\n\nclass Reference:\n \"\"\"A class for creating code reference.\"\"\"\n\n @staticmethod\n def generate(folder: str) -> None:\n \"\"\"\n A method for generate code reference.\n\n Args:\n folder: Reference destination folder.\n \"\"\"\n\n for path in sorted(Path(\"src\").rglob(\"*.py\")):\n module_path = path.relative_to(\"src\").with_suffix(\"\")\n\n doc_path = path.relative_to(\"src\").with_suffix(\".md\")\n full_doc_path = Path(folder, doc_path)\n\n parts = tuple(module_path.parts)\n\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n doc_path = doc_path.with_name(\"index.md\")\n full_doc_path = full_doc_path.with_name(\"index.md\")\n elif parts[-1] == \"__main__\":\n continue\n\n with mkdocs_gen_files.open(full_doc_path, \"w\") as f_md:\n item = \".\".join(parts)\n f_md.write(f\"::: {item}\")\n\n\ndef main() -> None:\n \"\"\"\n The main method.\n It prepares files for the documentation site.\n \"\"\"\n\n config = MkdocsConfig.load()\n\n site_url = config[\"site_url\"]\n if site_url.endswith(\"/\"):\n site_url = site_url[:-1]\n\n index_ipynbs: List[str] = []\n", "type": "infile" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):\n SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])\n for item in nav_item:\n SectionIndex.generate(nav_item=item)\n elif isinstance(nav_item, dict):\n for key in nav_item:\n SectionIndex.generate(nav_item=nav_item[key])\n\n\nclass Reference:\n \"\"\"A class for creating code reference.\"\"\"\n\n @staticmethod\n def generate(folder: str) -> None:\n \"\"\"\n A method for generate code reference.\n\n Args:\n folder: Reference destination folder.\n \"\"\"\n\n for path in sorted(Path(\"src\").rglob(\"*.py\")):\n module_path = path.relative_to(\"src\").with_suffix(\"\")\n\n doc_path = path.relative_to(\"src\").with_suffix(\".md\")\n full_doc_path = Path(folder, doc_path)\n\n parts = tuple(module_path.parts)\n\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n doc_path = doc_path.with_name(\"index.md\")\n full_doc_path = full_doc_path.with_name(\"index.md\")\n elif parts[-1] == \"__main__\":\n continue\n\n with mkdocs_gen_files.open(full_doc_path, \"w\") as f_md:\n item = \".\".join(parts)\n f_md.write(f\"::: {item}\")\n\n\ndef main() -> None:\n \"\"\"\n The main method.\n It prepares files for the documentation site.\n \"\"\"\n\n config = MkdocsConfig.load()\n\n site_url = config[\"site_url\"]\n if site_url.endswith(\"/\"):\n site_url = site_url[:-1]\n\n index_ipynbs: List[str] = []\n\n SectionIndex.generate(nav_item=config[\"nav\"])\n", "type": "infile" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):\n SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])\n for item in nav_item:\n SectionIndex.generate(nav_item=item)\n elif isinstance(nav_item, dict):\n for key in nav_item:\n SectionIndex.generate(nav_item=nav_item[key])\n\n\nclass Reference:\n \"\"\"A class for creating code reference.\"\"\"\n\n @staticmethod\n def generate(folder: str) -> None:\n \"\"\"\n A method for generate code reference.\n\n Args:\n folder: Reference destination folder.\n \"\"\"\n\n for path in sorted(Path(\"src\").rglob(\"*.py\")):\n module_path = path.relative_to(\"src\").with_suffix(\"\")\n\n doc_path = path.relative_to(\"src\").with_suffix(\".md\")\n full_doc_path = Path(folder, doc_path)\n\n parts = tuple(module_path.parts)\n\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n doc_path = doc_path.with_name(\"index.md\")\n full_doc_path = full_doc_path.with_name(\"index.md\")\n elif parts[-1] == \"__main__\":\n continue\n\n with mkdocs_gen_files.open(full_doc_path, \"w\") as f_md:\n item = \".\".join(parts)\n f_md.write(f\"::: {item}\")\n\n\ndef main() -> None:\n \"\"\"\n The main method.\n It prepares files for the documentation site.\n \"\"\"\n\n config = MkdocsConfig.load()\n\n site_url = config[\"site_url\"]\n if site_url.endswith(\"/\"):\n site_url = site_url[:-1]\n\n index_ipynbs: List[str] = []\n\n SectionIndex.generate(nav_item=config[\"nav\"])\n\n Reference.generate(\"reference\")\n", "type": "infile" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):", "type": "infile" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):\n SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])\n for item in nav_item:", "type": "infile" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):", "type": "infile" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):\n SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])\n for item in nav_item:\n SectionIndex.generate(nav_item=item)\n elif isinstance(nav_item, dict):\n for key in nav_item:", "type": "infile" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):", "type": "infile" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:", "type": "commited" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):\n SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])\n for item in nav_item:\n SectionIndex.generate(nav_item=item)\n elif isinstance(nav_item, dict):\n for key in nav_item:\n SectionIndex.generate(nav_item=nav_item[key])\n\n\nclass Reference:\n \"\"\"A class for creating code reference.\"\"\"\n\n @staticmethod\n def generate(folder: str) -> None:\n \"\"\"\n A method for generate code reference.\n\n Args:\n folder: Reference destination folder.\n \"\"\"\n\n for path in sorted(Path(\"src\").rglob(\"*.py\")):\n module_path = path.relative_to(\"src\").with_suffix(\"\")\n\n doc_path = path.relative_to(\"src\").with_suffix(\".md\")\n full_doc_path = Path(folder, doc_path)\n\n parts = tuple(module_path.parts)\n\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n doc_path = doc_path.with_name(\"index.md\")\n full_doc_path = full_doc_path.with_name(\"index.md\")\n elif parts[-1] == \"__main__\":\n continue\n\n with mkdocs_gen_files.open(full_doc_path, \"w\") as f_md:\n item = \".\".join(parts)\n f_md.write(f\"::: {item}\")\n\n\ndef main() -> None:\n \"\"\"\n The main method.\n It prepares files for the documentation site.\n \"\"\"\n", "type": "commited" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):\n SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])\n for item in nav_item:\n SectionIndex.generate(nav_item=item)\n elif isinstance(nav_item, dict):\n for key in nav_item:\n SectionIndex.generate(nav_item=nav_item[key])\n\n\nclass Reference:\n \"\"\"A class for creating code reference.\"\"\"\n\n @staticmethod\n def generate(folder: str) -> None:\n \"\"\"\n A method for generate code reference.\n\n Args:\n folder: Reference destination folder.\n \"\"\"\n\n for path in sorted(Path(\"src\").rglob(\"*.py\")):\n module_path = path.relative_to(\"src\").with_suffix(\"\")\n\n doc_path = path.relative_to(\"src\").with_suffix(\".md\")\n full_doc_path = Path(folder, doc_path)\n\n parts = tuple(module_path.parts)\n\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n doc_path = doc_path.with_name(\"index.md\")\n full_doc_path = full_doc_path.with_name(\"index.md\")\n elif parts[-1] == \"__main__\":\n continue\n\n with mkdocs_gen_files.open(full_doc_path, \"w\") as f_md:\n item = \".\".join(parts)\n f_md.write(f\"::: {item}\")\n\n\ndef main() -> None:\n \"\"\"\n The main method.\n It prepares files for the documentation site.\n \"\"\"\n\n config = MkdocsConfig.load()\n\n site_url = config[\"site_url\"]\n if site_url.endswith(\"/\"):\n site_url = site_url[:-1]\n\n index_ipynbs: List[str] = []\n\n SectionIndex.generate(nav_item=config[\"nav\"])\n\n Reference.generate(\"reference\")\n\n Index.generate(\n readme=Path(__file__).parent / \"..\" / \"..\" / \"README.md\",\n site=site_url,\n ipynbs=index_ipynbs,\n )\n\n", "type": "commited" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"", "type": "non_informative" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):\n SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])\n for item in nav_item:\n SectionIndex.generate(nav_item=item)\n elif isinstance(nav_item, dict):\n for key in nav_item:\n SectionIndex.generate(nav_item=nav_item[key])\n\n\nclass Reference:\n \"\"\"A class for creating code reference.\"\"\"\n\n @staticmethod\n def generate(folder: str) -> None:\n \"\"\"\n A method for generate code reference.\n\n Args:\n folder: Reference destination folder.\n \"\"\"\n\n for path in sorted(Path(\"src\").rglob(\"*.py\")):\n module_path = path.relative_to(\"src\").with_suffix(\"\")\n\n doc_path = path.relative_to(\"src\").with_suffix(\".md\")\n full_doc_path = Path(folder, doc_path)\n\n parts = tuple(module_path.parts)\n\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n doc_path = doc_path.with_name(\"index.md\")\n full_doc_path = full_doc_path.with_name(\"index.md\")\n elif parts[-1] == \"__main__\":\n continue\n\n with mkdocs_gen_files.open(full_doc_path, \"w\") as f_md:\n item = \".\".join(parts)\n f_md.write(f\"::: {item}\")\n\n\ndef main() -> None:\n \"\"\"\n The main method.", "type": "non_informative" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:", "type": "non_informative" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"", "type": "non_informative" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod", "type": "non_informative" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):\n SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])\n for item in nav_item:\n SectionIndex.generate(nav_item=item)\n elif isinstance(nav_item, dict):\n for key in nav_item:\n SectionIndex.generate(nav_item=nav_item[key])\n\n\nclass Reference:\n \"\"\"A class for creating code reference.\"\"\"\n\n @staticmethod\n def generate(folder: str) -> None:\n \"\"\"\n A method for generate code reference.\n\n Args:\n folder: Reference destination folder.\n \"\"\"\n\n for path in sorted(Path(\"src\").rglob(\"*.py\")):\n module_path = path.relative_to(\"src\").with_suffix(\"\")\n\n doc_path = path.relative_to(\"src\").with_suffix(\".md\")\n full_doc_path = Path(folder, doc_path)\n\n parts = tuple(module_path.parts)\n\n if parts[-1] == \"__init__\":\n parts = parts[:-1]\n doc_path = doc_path.with_name(\"index.md\")\n full_doc_path = full_doc_path.with_name(\"index.md\")\n elif parts[-1] == \"__main__\":\n continue\n\n with mkdocs_gen_files.open(full_doc_path, \"w\") as f_md:\n item = \".\".join(parts)\n f_md.write(f\"::: {item}\")\n\n\ndef main() -> None:\n \"\"\"\n The main method.\n It prepares files for the documentation site.\n \"\"\"\n\n config = MkdocsConfig.load()\n\n site_url = config[\"site_url\"]\n if site_url.endswith(\"/\"):\n site_url = site_url[:-1]\n\n index_ipynbs: List[str] = []\n\n SectionIndex.generate(nav_item=config[\"nav\"])\n\n Reference.generate(\"reference\")\n\n Index.generate(\n readme=Path(__file__).parent / \"..\" / \"..\" / \"README.md\",\n site=site_url,", "type": "random" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):\n SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])\n for item in nav_item:\n SectionIndex.generate(nav_item=item)\n elif isinstance(nav_item, dict):\n for key in nav_item:\n SectionIndex.generate(nav_item=nav_item[key])\n\n\nclass Reference:\n \"\"\"A class for creating code reference.\"\"\"\n\n @staticmethod\n def generate(folder: str) -> None:\n \"\"\"\n A method for generate code reference.\n\n Args:\n folder: Reference destination folder.\n \"\"\"\n\n for path in sorted(Path(\"src\").rglob(\"*.py\")):\n module_path = path.relative_to(\"src\").with_suffix(\"\")\n\n doc_path = path.relative_to(\"src\").with_suffix(\".md\")\n full_doc_path = Path(folder, doc_path)\n\n parts = tuple(module_path.parts)\n", "type": "random" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)", "type": "random" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n\n for match in re.finditer(\n rf\"\\[([^]]*)\\]\\(({site}/)([^]]*)(.html)([^]]*)?\\)\",\n content,\n ):\n if match[0] in ipynbs:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.ipynb{match[5]})\"\n )\n else:\n content = content.replace(\n match[0], f\"[{match[1]}]({match[3]}.md{match[5]})\"\n )\n\n content = content.replace(f\"{site}/\", \"\")\n\n with mkdocs_gen_files.open(\"index.md\", \"w\") as f_index:\n f_index.write(content)\n\n\nclass SectionIndex:\n \"\"\"A class for creating section index files.\"\"\"\n\n @staticmethod\n def _write_index_file(file: str, toc: list) -> None:\n \"\"\"\n A method for writing table of contents into a section index file.\n\n Args:\n file: The section index file.\n toc: Items of the table of contents.\n \"\"\"\n\n for item in toc:\n if isinstance(item, str):\n SectionIndex._write_str_index(file, item)\n elif isinstance(item, dict):\n SectionIndex._write_dict_index(file, item)\n else:\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def _write_str_index(file: str, item: str) -> None:\n \"\"\"\n A method for writing an str toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n parts = item.split(\"/\")\n part = parts[-1].replace(\".md\", \"\").capitalize()\n link = Path(item).relative_to(Path(file).parent)\n f_index.write(f\"* [{part}]({link})\\n\")\n\n @staticmethod\n def _write_dict_index(file: str, item: dict) -> None:\n \"\"\"\n A method for writing a dict toc item into a section index file.\n\n Args:\n file: The section index file.\n item: Item of the table of contents.\n \"\"\"\n\n with mkdocs_gen_files.open(file, \"a\") as f_index:\n for key in item:\n if isinstance(item[key], str):\n link = Path(item[key]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n if item[key] and isinstance(item[key], list):\n if isinstance(item[key][0], str):\n if item[key][0].endswith(\"index.md\"):\n link = Path(item[key][0]).relative_to(Path(file).parent)\n f_index.write(f\"* [{key}]({link})\\n\")\n continue\n raise NotImplementedError(f\"{item}\")\n\n @staticmethod\n def generate(nav_item: Union[list, dict, str]) -> None:\n \"\"\"\n A method for creating section indices for the navigation.\n\n Args:\n nav_item: Part of the navigation.\n \"\"\"\n\n if isinstance(nav_item, list):\n if (\n nav_item\n and isinstance(nav_item[0], str)\n and nav_item[0].endswith(\"index.md\")\n ):\n SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])\n for item in nav_item:\n SectionIndex.generate(nav_item=item)\n elif isinstance(nav_item, dict):", "type": "random" }, { "content": "\"\"\"Generate the code reference pages and navigation.\"\"\"\n\n# pylint: disable=too-few-public-methods\n\nfrom pathlib import Path\nfrom typing import Union, List\nimport re\n\nimport yaml\nimport mkdocs_gen_files # type: ignore\n\n\nclass MkdocsConfig:\n \"\"\"A class for working with mkdocs configuration.\"\"\"\n\n @staticmethod\n def load() -> dict:\n \"\"\"\n A method for loading mkdocs configuration.\n\n Returns:\n A dictionary that contains the mkdocs configuration.\n \"\"\"\n\n with open(Path(__file__).parent / \"mkdocs.yml\", \"rt\", encoding=\"utf8\") as f_yml:\n return yaml.load(f_yml, Loader=yaml.FullLoader)\n\n\nclass Index:\n \"\"\"A class for creating index file from README.\"\"\"\n\n @staticmethod\n def generate(readme: Path, site: str, ipynbs: List[str]) -> None:\n \"\"\"\n A method for generating the index file.\n\n Args:\n readme: README.md path.\n site: Site url.\n ipynbs: List of html links that are ipynb files.\n \"\"\"\n\n with open(readme, \"rt\", encoding=\"utf8\") as f_readme:\n content = f_readme.read()\n", "type": "random" } ]
[ " SectionIndex.generate(nav_item=config[\"nav\"])", " Reference.generate(\"reference\")", " Index.generate(", " SectionIndex._write_index_file(file=nav_item[0], toc=nav_item[1:])", " SectionIndex.generate(nav_item=item)", " SectionIndex._write_str_index(file, item)", " SectionIndex.generate(nav_item=nav_item[key])", " SectionIndex._write_dict_index(file, item)", " return yaml.load(f_yml, Loader=yaml.FullLoader)", " config = MkdocsConfig.load()", "main()", " A method for loading mkdocs configuration.", " It prepares files for the documentation site.", " \"\"\"", "", " def _write_index_file(file: str, toc: list) -> None:", " ipynbs=index_ipynbs,", " if parts[-1] == \"__init__\":", " f_index.write(f\"* [{part}]({link})\\n\")", " for key in nav_item:", " for match in re.finditer(" ]
METASEP
35
vizzuhq__ipyvizzu
vizzuhq__ipyvizzu METASEP src/ipyvizzu/data/converters/spark/converter.py METASEP """ This module provides the `SparkDataFrameConverter` class, which allows converting a `pyspark` `DataFrame` into a list of dictionaries representing series. """ from types import ModuleType from typing import List, Tuple from ipyvizzu.data.converters.defaults import NAN_DIMENSION, NAN_MEASURE from ipyvizzu.data.converters.df.defaults import MAX_ROWS from ipyvizzu.data.converters.df.converter import DataFrameConverter from ipyvizzu.data.infer_type import InferType from ipyvizzu.data.type_alias import ( DimensionValue, MeasureValue, SeriesValues, ) class SparkDataFrameConverter(DataFrameConverter): """ Converts a `pyspark` `DataFrame` into a list of dictionaries representing series. Each dictionary contains information about the series `name`, `values` and `type`. Parameters: df: The `pyspark` `DataFrame` to convert. default_measure_value: Default value to use for missing measure values. Defaults to 0. default_dimension_value: Default value to use for missing dimension values. Defaults to an empty string. max_rows: The maximum number of rows to include in the converted series list. If the `df` contains more rows, a random sample of the given number of rows will be taken. Example: Get series list from `DataFrame` columns: converter = SparkDataFrameConverter(df) series_list = converter.get_series_list() """ # pylint: disable=too-few-public-methods def __init__( self, df: "pyspark.sql.DataFrame", # type: ignore default_measure_value: MeasureValue = NAN_MEASURE, default_dimension_value: DimensionValue = NAN_DIMENSION, max_rows: int = MAX_ROWS, ) -> None: super().__init__(default_measure_value, default_dimension_value, max_rows) self._pyspark = self._get_pyspark() self._df = self._preprocess_df(df) def _get_pyspark(self) -> ModuleType: try: import pyspark # pylint: disable=import-outside-toplevel return pyspark except ImportError as error: raise ImportError( "pyspark is not available. Please install pyspark to use this feature." ) from error @staticmethod def _get_sampled_df( df: "pyspark.sql.DataFrame", fraction: float # type: ignore ) -> "pyspark.sql.DataFrame": # type: ignore return df.sample(withReplacement=False, fraction=fraction, seed=42) @staticmethod def _get_row_number(df: "pyspark.sql.DataFrame") -> int: # type: ignore return df.count() def _get_columns(self) -> List[str]: return self._df.columns def _convert_to_series_values_and_type( self, obj: str ) -> Tuple[SeriesValues, InferType]: column_name = obj column = self._df.select(column_name) integer_type = self._pyspark.sql.types.IntegerType double_type = self._pyspark.sql.types.DoubleType if isinstance(column.schema[column_name].dataType, (integer_type, double_type)): return self._convert_to_measure_values(column_name), InferType.MEASURE return self._convert_to_dimension_values(column_name), InferType.DIMENSION def _convert_to_measure_values(self, obj: str) -> List[MeasureValue]: column_name = obj when = self._pyspark.sql.functions.when col = self._pyspark.sql.functions.col df = self._df.withColumn( column_name, when(col(column_name).isNull(), self._default_measure_value).otherwise( col(column_name) ), ) df = df.withColumn(column_name, col(column_name).cast("float")) return df.select(column_name).rdd.flatMap(lambda x: x).collect() def _convert_to_dimension_values(self, obj: str) -> List[DimensionValue]: column_name = obj when = self._pyspark.sql.functions.when col = self._pyspark.sql.functions.col df = self._df.withColumn( column_name, when(col(column_name).isNull(), self._default_dimension_value).otherwise( col(column_name) ), ) df = df.withColumn(column_name, col(column_name).cast("string")) return df.select(column_name).rdd.flatMap(lambda x: x).collect() src/ipyvizzu/data/converters/spark/__init__.py METASEP """ This module provides modules for pyspark converter. """ src/ipyvizzu/data/converters/pandas/converter.py METASEP """ This module provides the `PandasDataFrameConverter` class, which allows converting a `pandas` `DataFrame` or `Series` into a list of dictionaries representing series. """ from types import ModuleType from typing import List, Optional, Tuple, Union from ipyvizzu.data.converters.defaults import NAN_DIMENSION, NAN_MEASURE from ipyvizzu.data.converters.df.defaults import MAX_ROWS from ipyvizzu.data.converters.df.converter import DataFrameConverter from ipyvizzu.data.infer_type import InferType from ipyvizzu.data.type_alias import ( DimensionValue, MeasureValue, Series, SeriesValues, ) class PandasDataFrameConverter(DataFrameConverter): """ Converts a `pandas` `DataFrame` or `Series` into a list of dictionaries representing series. Each dictionary contains information about the series `name`, `values` and `type`. Parameters: df: The `pandas` `DataFrame` or `Series` to convert. default_measure_value: Default value to use for missing measure values. Defaults to 0. default_dimension_value: Default value to use for missing dimension values. Defaults to an empty string. max_rows: The maximum number of rows to include in the converted series list. If the `df` contains more rows, a random sample of the given number of rows will be taken. include_index: Name for the index column to include as a series. If provided, the index column will be added. Defaults to None. Example: Get series list from `DataFrame` columns: converter = PandasDataFrameConverter(df) series_list = converter.get_series_list() """ def __init__( self, df: Union["pandas.DataFrame", "pandas.Series"], # type: ignore default_measure_value: MeasureValue = NAN_MEASURE, default_dimension_value: DimensionValue = NAN_DIMENSION, max_rows: int = MAX_ROWS, include_index: Optional[str] = None, ) -> None: # pylint: disable=too-many-arguments super().__init__(default_measure_value, default_dimension_value, max_rows) self._pd = self._get_pandas() self._df = self._preprocess_df( self._pd.DataFrame(df) if isinstance(df, self._pd.Series) else df ) self._include_index = include_index def get_series_list(self) -> List[Series]: """ Convert the `DataFrame` columns to a list of dictionaries representing series. Returns: A list of dictionaries representing series, where each dictionary has `name`, `values` and `type` keys. """ series_list = super().get_series_list() index_series = self.get_series_from_index() return index_series + series_list def get_series_from_index(self) -> List[Series]: """ Convert the `DataFrame` index to a dictionary representing a series, if `include_index` is provided. Returns: A dictionary representing the index series with `name`, `values` and `type` keys. Returns `None` if `include_index` is not provided. """ if not self._include_index or self._df.index.empty: return [] df = self._pd.DataFrame({self._include_index: self._df.index}) index_series_converter = PandasDataFrameConverter( df, self._default_measure_value, self._default_dimension_value ) return index_series_converter.get_series_list() def _get_pandas(self) -> ModuleType: try: import pandas as pd # pylint: disable=import-outside-toplevel return pd except ImportError as error: raise ImportError( "pandas is not available. Please install pandas to use this feature." ) from error @staticmethod def _get_sampled_df( df: "pandas.DataFrame", fraction: float # type: ignore ) -> "pandas.DataFrame": # type: ignore return df.sample(withReplacement=False, fraction=fraction, seed=42) @staticmethod def _get_row_number(df: "pandas.DataFrame") -> int: # type: ignore return len(df) def _get_columns(self) -> List[str]: return self._df.columns def _convert_to_series_values_and_type( self, obj: str # type: ignore ) -> Tuple[SeriesValues, InferType]: column_name = obj column = self._df[column_name] if self._pd.api.types.is_numeric_dtype(column.dtype): return self._convert_to_measure_values(column), InferType.MEASURE return self._convert_to_dimension_values(column), InferType.DIMENSION def _convert_to_measure_values( self, obj: "pandas.DataFrame" # type: ignore ) -> List[MeasureValue]: column = obj return column.fillna(self._default_measure_value).astype(float).values.tolist() def _convert_to_dimension_values( self, obj: "pandas.DataFrame" # type: ignore ) -> List[DimensionValue]: column = obj return column.fillna(self._default_dimension_value).astype(str).values.tolist() src/ipyvizzu/data/converters/pandas/__init__.py METASEP """ This module provides modules for pandas converter. """ src/ipyvizzu/data/converters/numpy/type_alias.py METASEP """ This module provides typing aliases for numpy converter. """ from typing import Dict, TypeVar, Union Index = int """Represents the index of a column.""" Name = str """Represents the name of a column.""" DType = type """Represents the dtype of a column.""" ColumnName = Union[Name, Dict[Index, Name]] """ Represents a column name. It is a dictionary of Index:Name pairs or for single-dimensional arrays, it can be just a Name. """ ColumnDtype = Union[DType, Dict[Index, DType]] """ Represents a column dtype. It is a dictionary of Index:DType pairs or for single-dimensional arrays, it can be just a DType. """ ColumnConfig = TypeVar("ColumnConfig", Name, DType) """ Represents a column config. It can be Name or DType. """ src/ipyvizzu/data/converters/numpy/converter.py METASEP """ This module provides the `NumpyArrayConverter` class, which allows converting a `numpy` `array` into a list of dictionaries representing series. """ from types import ModuleType from typing import Dict, List, Optional, Tuple, Union from ipyvizzu.data.converters.defaults import NAN_DIMENSION, NAN_MEASURE from ipyvizzu.data.converters.converter import ToSeriesListConverter from ipyvizzu.data.converters.numpy.type_alias import ( ColumnConfig, ColumnDtype, ColumnName, DType, Index, Name, ) from ipyvizzu.data.infer_type import InferType from ipyvizzu.data.type_alias import ( DimensionValue, MeasureValue, Series, SeriesValues, ) class NumpyArrayConverter(ToSeriesListConverter): """ Converts a `numpy` `array` into a list of dictionaries representing series. Each dictionary contains information about the series `name`, `values` and `type`. Parameters: np_array: The `numpy` `array` to convert. column_name: The name of a column. By default, uses column indices. Can be set with an Index:Name pair or, for single-dimensional arrays, with just the Name. column_dtype: The dtype of a column. By default, uses the np_array's dtype. Can be set with an Index:DType pair or, for single-dimensional arrays, with just the DType. default_measure_value: Default value to use for missing measure values. Defaults to 0. default_dimension_value: Default value to use for missing dimension values. Defaults to an empty string. Example: Get series list from `numpy` `array`: converter = NumpyArrayConverter(np_array) series_list = converter.get_series_list() """ # pylint: disable=too-few-public-methods def __init__( self, np_array: "numpy.array", # type: ignore column_name: Optional[ColumnName] = None, column_dtype: Optional[ColumnDtype] = None, default_measure_value: MeasureValue = NAN_MEASURE, default_dimension_value: DimensionValue = NAN_DIMENSION, ) -> None: # pylint: disable=too-many-arguments super().__init__(default_measure_value, default_dimension_value) self._np = self._get_numpy() self._np_array = np_array self._column_name: Dict[Index, Name] = self._get_columns_config(column_name) self._column_dtype: Dict[Index, DType] = self._get_columns_config(column_dtype) def get_series_list(self) -> List[Series]: """ Convert the `numpy` `array` to a list of dictionaries representing series. Returns: A list of dictionaries representing series, where each dictionary has `name`, `values` and `type` keys. """ if self._np_array.ndim == 0: return [] if self._np_array.ndim == 1: return self._get_series_list_from_array1dim() if self._np_array.ndim == 2: return self._get_series_list_from_array2dim() raise ValueError("arrays larger than 2D are not supported") def _get_series_list_from_array1dim(self) -> List[Series]: i = 0 name = self._column_name.get(i, i) values, infer_type = self._convert_to_series_values_and_type( (i, self._np_array) ) return [self._convert_to_series(name, values, infer_type)] def _get_series_list_from_array2dim(self) -> List[Series]: series_list = [] for i in range(self._np_array.shape[1]): name = self._column_name.get(i, i) values, infer_type = self._convert_to_series_values_and_type( (i, self._np_array[:, i]) ) series_list.append(self._convert_to_series(name, values, infer_type)) return series_list def _get_numpy(self) -> ModuleType: try: import numpy as np # pylint: disable=import-outside-toplevel return np except ImportError as error: raise ImportError( "numpy is not available. Please install numpy to use this feature." ) from error def _get_columns_config( self, config: Optional[Union[ColumnConfig, Dict[Index, ColumnConfig]]], ) -> Dict[Index, ColumnConfig]: if config is None: return {} if not isinstance(config, dict): if not self._np_array.ndim == 1: raise ValueError("non dict value can only be used for a 1D array") return {0: config} return config def _convert_to_series_values_and_type( self, obj: Tuple[int, "numpy.array"] # type: ignore ) -> Tuple[SeriesValues, InferType]: column = obj i = column[0] array = column[1] dtype = self._column_dtype.get(i, self._np_array.dtype) if self._np.issubdtype(dtype, self._np.number): return self._convert_to_measure_values(array), InferType.MEASURE return self._convert_to_dimension_values(array), InferType.DIMENSION def _convert_to_measure_values( self, obj: "numpy.array" # type: ignore ) -> List[MeasureValue]: array = obj array_float = array.astype(float) return self._np.nan_to_num( array_float, nan=self._default_measure_value ).tolist() def _convert_to_dimension_values( self, obj: "numpy.array" # type: ignore ) -> List[DimensionValue]: array = obj array_str = array.astype(str) replace_nan = "nan" mask = array_str == replace_nan array_str[mask] = self._default_dimension_value return array_str.tolist() src/ipyvizzu/data/converters/numpy/__init__.py METASEP """ This module provides modules for numpy converter. """ src/ipyvizzu/data/converters/df/type_alias.py METASEP """ This module provides typing aliases for data frame converter. """ from typing import Any, TypeVar DataFrame = TypeVar("DataFrame", Any, Any) """ Represents a data frame. """ src/ipyvizzu/data/converters/df/defaults.py METASEP """ This module provides default values for data frame converters. """ MAX_ROWS: int = 10000 """Default maximum number of rows.""" src/ipyvizzu/data/converters/df/converter.py METASEP """ This module provides the `DataFrameConverter` abstract class. """ from abc import abstractmethod from typing import List from ipyvizzu.data.converters.converter import ToSeriesListConverter from ipyvizzu.data.converters.df.type_alias import DataFrame from ipyvizzu.data.type_alias import ( DimensionValue, MeasureValue, Series, ) class DataFrameConverter(ToSeriesListConverter): """ Converts data frame into a list of dictionaries representing series. Each dictionary contains information about the series `name`, `values` and `type`. """ # pylint: disable=too-few-public-methods def __init__( self, default_measure_value: MeasureValue, default_dimension_value: DimensionValue, max_rows: int, ) -> None: super().__init__(default_measure_value, default_dimension_value) self._max_rows = max_rows def get_series_list(self) -> List[Series]: """ Convert the `DataFrame` columns to a list of dictionaries representing series. Returns: A list of dictionaries representing series, where each dictionary has `name`, `values` and `type` keys. """ series_list = [] for name in self._get_columns(): series_list.append(self._get_series_from_column(name)) return series_list def _get_series_from_column(self, column_name: str) -> Series: values, infer_type = self._convert_to_series_values_and_type(column_name) return self._convert_to_series(column_name, values, infer_type) def _preprocess_df(self, df: DataFrame) -> DataFrame: rows = self._get_row_number(df) if rows > self._max_rows: return self._get_sampled_df(df, min(self._max_rows / rows, 1.0)) return df @staticmethod @abstractmethod def _get_row_number(df: DataFrame) -> int: """ Return row number of a data frame. """ @staticmethod @abstractmethod def _get_sampled_df(df: DataFrame, fraction: float) -> DataFrame: """ Return a sampled data frame by fraction. """ @abstractmethod def _get_columns(self) -> List[str]: """ Return column names of the data frame. """ src/ipyvizzu/data/converters/df/__init__.py METASEP """ This module provides modules for data frame converter. """ src/ipyvizzu/data/converters/defaults.py METASEP """ This module provides default values for converters. """ from ipyvizzu.data.type_alias import DimensionValue, MeasureValue NAN_DIMENSION: DimensionValue = "" """Default dimension value to replace nan values.""" NAN_MEASURE: MeasureValue = 0 """Default measure value to replace nan values.""" src/ipyvizzu/data/converters/converter.py METASEP """ This module provides the `ToSeriesListConverter` abstract class. """ from abc import ABC, abstractmethod from typing import Any, List, Tuple, Union from ipyvizzu.data.infer_type import InferType from ipyvizzu.data.type_alias import ( DimensionValue, MeasureValue, Series, SeriesValues, ) class ToSeriesListConverter(ABC): """ Converts data into a list of dictionaries representing series. Each dictionary contains information about the series `name`, `values` and `type`. """ # pylint: disable=too-few-public-methods def __init__( self, default_measure_value: MeasureValue, default_dimension_value: DimensionValue, ) -> None: self._default_measure_value = default_measure_value self._default_dimension_value = default_dimension_value @abstractmethod def get_series_list(self) -> List[Series]: """ Convert data to a list of dictionaries representing series. Returns: A list of dictionaries representing series, where each dictionary has `name`, `values` and `type` keys. """ @abstractmethod def _convert_to_series_values_and_type( self, obj: Any ) -> Tuple[SeriesValues, InferType]: """ Convert object to SeriesValues and InferType. """ @abstractmethod def _convert_to_measure_values(self, obj: Any) -> List[MeasureValue]: """ Convert object to a list of MeasureValue. """ @abstractmethod def _convert_to_dimension_values(self, obj: Any) -> List[DimensionValue]: """ Convert object to a list of DimensionValue. """ def _convert_to_series( self, name: Union[str, int], values: SeriesValues, infer_type: InferType ) -> Series: return { "name": str(name), "values": values, "type": infer_type.value, } src/ipyvizzu/data/converters/__init__.py METASEP """ This module contains converter classes that offer a user-friendly interface for data conversion, enabling users to effortlessly transform various data formats into a standardized representation of series compatible with `ipyvizzu`. """ tools/docs/style/gen_style_reference.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from pathlib import Path import sys import mkdocs_gen_files REPO_PATH = Path(__file__).parent / ".." / ".." / ".." TOOLS_PATH = REPO_PATH / "tools" MKDOCS_PATH = TOOLS_PATH / "docs" GEN_PATH = MKDOCS_PATH / "style" sys.path.insert(0, str(TOOLS_PATH / "modules")) from chdir import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order chdir, ) from node import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Node, ) from vizzu import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Vizzu, ) class StyleReference: # pylint: disable=too-few-public-methods @staticmethod def generate(dst: str) -> None: content = Node.node( True, GEN_PATH / "gen_style_reference.mjs", Vizzu.get_vizzu_styleref_backend_url(), ) with mkdocs_gen_files.open(dst, "a") as f_index: f_index.write(f"\n{content}\n") def main() -> None: with chdir(REPO_PATH): StyleReference.generate(dst="tutorial/style.md") main() tools/docs/snippets/gen_vizzu.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from pathlib import Path import sys import mkdocs_gen_files REPO_PATH = Path(__file__).parent / ".." / ".." / ".." TOOLS_PATH = REPO_PATH / "tools" MKDOCS_PATH = TOOLS_PATH / "docs" sys.path.insert(0, str(TOOLS_PATH / "modules")) from chdir import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order chdir, ) from vizzu import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Vizzu, ) class VizzuUrl: # pylint: disable=too-few-public-methods TEMPLATE = 'const vizzu = "{vizzu}";\n\nexport default vizzu;\n' @staticmethod def generate(dst: str) -> None: with mkdocs_gen_files.open(dst, "w") as f_vizzu: f_vizzu.write(VizzuUrl.TEMPLATE.format(vizzu=Vizzu.get_vizzu_backend_url())) def main() -> None: with chdir(REPO_PATH): VizzuUrl.generate("assets/javascripts/vizzu.js") main() tools/docs/reference/gen_reference.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from pathlib import Path import sys from types import ModuleType import mkdocs_gen_files # type: ignore import ipyvizzu REPO_PATH = Path(__file__).parent / ".." / ".." / ".." TOOLS_PATH = REPO_PATH / "tools" MKDOCS_PATH = TOOLS_PATH / "docs" sys.path.insert(0, str(TOOLS_PATH / "modules")) from chdir import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order chdir, ) from vizzu import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Vizzu, VIZZU_SITE_URL, ) class Reference: @staticmethod def generate(package: ModuleType, folder: str) -> None: for path in sorted(Path("src").rglob("*.py")): module_path = path.relative_to("src").with_suffix("") doc_path = path.relative_to("src").with_suffix(".md") full_doc_path = Path(folder, doc_path) parts = tuple(module_path.parts) if parts[-1] == "__main__": continue if parts[-1] == "__init__": parts = parts[:-1] doc_path = doc_path.with_name("index.md") full_doc_path = full_doc_path.with_name("index.md") item = ".".join(parts) if item == package.__name__: mkdocs_gen_files.set_edit_path(full_doc_path, ".." / path) with mkdocs_gen_files.open(full_doc_path, "w") as f_md: f_md.write(f"{package.__doc__}\n") for item in package.__all__: f_md.write(f"::: {package.__name__}.{item}\n") f_md.write(" options:\n") f_md.write(" show_root_members_full_path: false\n") else: mkdocs_gen_files.set_edit_path(full_doc_path, ".." / path) with mkdocs_gen_files.open(full_doc_path, "w") as f_md: f_md.write(f"::: {item}") @staticmethod def generate_version_script(file: str) -> None: with mkdocs_gen_files.open(file, "w") as f_js: vizzu_version = Vizzu.get_vizzu_version() f_js.write( f""" document.addEventListener("DOMContentLoaded", (event) => {{ if (window.location.href.includes("/reference/")) {{ const links = document.links; for (let i = 0; i < links.length; i++) {{ if ( links[i].hostname !== window.location.hostname && links[i].href.includes("{VIZZU_SITE_URL}") ) {{ links[i].href = links[i].href.replace("latest", "{vizzu_version}"); }} }} }} }}); """ ) def main() -> None: with chdir(REPO_PATH): Reference.generate(ipyvizzu, "reference") Reference.generate_version_script("assets/javascripts/codereflinks.js") main() tools/docs/pages/gen_pages.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import os from pathlib import Path from typing import Union, Optional, List import sys import mkdocs_gen_files # type: ignore REPO_PATH = Path(__file__).parent / ".." / ".." / ".." TOOLS_PATH = REPO_PATH / "tools" MKDOCS_PATH = TOOLS_PATH / "docs" sys.path.insert(0, str(TOOLS_PATH / "modules")) sys.path.insert(0, str(TOOLS_PATH / "ci")) sys.path.insert(0, str(MKDOCS_PATH)) from chdir import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order chdir, ) from vizzu import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Vizzu, ) from markdown_format import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Markdown, ) from config import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order MkdocsConfig, ) class IndexPages: # pylint: disable=too-few-public-methods @staticmethod def _write_index_file(file: str, toc: list) -> None: for item in toc: if isinstance(item, str): IndexPages._write_str_index(file, item) elif isinstance(item, dict): IndexPages._write_dict_index(file, item) else: raise NotImplementedError(f"{item}") @staticmethod def _write_str_index(file: str, item: str) -> None: with mkdocs_gen_files.open(file, "a") as f_index: parts = item.split("/") part = parts[-1].replace(".md", "").capitalize() link = Path(os.path.relpath(item, Path(file).parent)) f_index.write(f"* [{part}]({link})\n") @staticmethod def _write_dict_index(file: str, item: dict) -> None: with mkdocs_gen_files.open(file, "a") as f_index: for key in item: if isinstance(item[key], str): link = Path(os.path.relpath(item[key], Path(file).parent)) f_index.write(f"* [{key}]({link})\n") continue if item[key] and isinstance(item[key], list): if isinstance(item[key][0], str): if item[key][0].endswith("index.md"): link = Path( os.path.relpath(item[key][0], Path(file).parent) ) f_index.write(f"* [{key}]({link})\n") continue raise NotImplementedError(f"{item}") @staticmethod def generate( nav_item: Union[list, dict, str], skip: Optional[List[str]] = None ) -> None: if isinstance(nav_item, list): if ( nav_item and isinstance(nav_item[0], str) and nav_item[0].endswith("index.md") ): if not skip or nav_item[0] not in skip: original = Path("docs", nav_item[0]) if original.exists(): mkdocs_gen_files.set_edit_path(nav_item[0], nav_item[0]) with mkdocs_gen_files.open(nav_item[0], "a") as f_index: f_index.write("\n") IndexPages._write_index_file(file=nav_item[0], toc=nav_item[1:]) for item in nav_item: IndexPages.generate(nav_item=item, skip=skip) elif isinstance(nav_item, dict): for key in nav_item: IndexPages.generate(nav_item=nav_item[key], skip=skip) class Page: # pylint: disable=too-few-public-methods @staticmethod def generate(src: Path, dst: str, pos: str, site: str, keep: bool = False) -> None: with open(src, "rt", encoding="utf8") as f_src: content = f_src.read() content = content.replace(f"{site}/latest/", pos).replace(f"{site}/latest", pos) if dst == "index.md": example = "./showcases/titanic/titanic.csv" content = content.replace(example, f"{site}/latest/{example[2:]}") content = Vizzu.set_version(content) if keep: content = f"<pre>{content}</pre>" mkdocs_gen_files.set_edit_path(dst, ".." / Path(dst).parent / Path(src).name) with mkdocs_gen_files.open(dst, "w") as f_dst: f_dst.write(content) class Docs: # pylint: disable=too-few-public-methods @staticmethod def generate(skip: Optional[List[str]] = None) -> None: docs_path = REPO_PATH / "docs" for path in list(docs_path.rglob("*.md")) + list(docs_path.rglob("*.js")): if skip and path.name in skip: continue with open(path, "rt", encoding="utf8") as f_src: dst = path.relative_to(docs_path) content = f_src.read() if path.suffix == ".md": content = Vizzu.set_version(content) content = Markdown.format(content) mkdocs_gen_files.set_edit_path(dst, dst) with mkdocs_gen_files.open(dst, "w") as f_dst: f_dst.write(content) def main() -> None: with chdir(REPO_PATH): config = MkdocsConfig.load(MKDOCS_PATH / "mkdocs.yml") Docs.generate() IndexPages.generate( nav_item=config["nav"], skip=["examples/index.md", "examples/analytical_operations/index.md"], ) Page.generate( src=REPO_PATH / "README.md", dst="index.md", pos="./", site=config["site_url"], ) Page.generate( src=REPO_PATH / "CONTRIBUTING.md", dst="CONTRIBUTING.md", pos="../", site=config["site_url"], ) Page.generate( src=REPO_PATH / "CODE_OF_CONDUCT.md", dst="CODE_OF_CONDUCT.md", pos="../", site=config["site_url"], ) Page.generate( src=REPO_PATH / "LICENSE", dst="LICENSE.md", pos="../", site=config["site_url"], keep=True, ) main() tools/docs/examples/gen_examples.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import os from pathlib import Path import re import sys from typing import List, Dict, Optional, Union, Tuple import mkdocs_gen_files import markdown REPO_PATH = Path(__file__).parent / ".." / ".." / ".." TOOLS_PATH = REPO_PATH / "tools" MKDOCS_PATH = TOOLS_PATH / "docs" GEN_PATH = MKDOCS_PATH / "examples" VIZZU_LIB_PATH = REPO_PATH / "vizzu-lib" WEB_CONTENT_PATH = ( VIZZU_LIB_PATH / "test" / "integration" / "test_cases" / "web_content" ) TEST_DATA_PATH = VIZZU_LIB_PATH / "test" / "integration" / "test_data" STATIC_EXAMPLES_PATH = WEB_CONTENT_PATH / "static" OPERATION_EXAMPLES_PATH = WEB_CONTENT_PATH / "analytical_operations" PRESET_EXAMPLES_PATH = WEB_CONTENT_PATH / "presets" SHOWCASES_PATH = REPO_PATH / "docs" / "showcases" JS_ASSETS_PATH = "assets/javascripts" sys.path.insert(0, str(TOOLS_PATH / "modules")) sys.path.insert(0, str(TOOLS_PATH / "ci")) sys.path.insert(0, str(MKDOCS_PATH)) from chdir import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order chdir, ) from node import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Node, ) from vizzu import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Vizzu, VIZZU_SITE_URL, ) from markdown_format import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Markdown, ) from config import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order MkdocsConfig, ) class GenExamples: # pylint: disable=too-many-instance-attributes datafiles: Dict[str, bool] = {} datafile_re = re.compile(r"test_data\/(\w*).mjs") dataname_re = re.compile(r"import\s*\{\s*(.*)\s*}") title_re = re.compile(r'title\:\s"(.*)"') def __init__(self, name: str, src: Path, dst: str) -> None: self._name = name self._src = src self._dst = dst self._depth = len(dst.split("/")) + 1 self._generated: List[str] = [] self._indices: List[str] = [] self._merge_subfolders = False self._video_thumbnails = False self._blocked: List[str] = [] @property def merge_subfolders(self) -> bool: return self._merge_subfolders @merge_subfolders.setter def merge_subfolders(self, value: bool) -> None: self._merge_subfolders = value @property def video_thumbnails(self) -> bool: return self._video_thumbnails @video_thumbnails.setter def video_thumbnails(self, value: bool) -> None: self._video_thumbnails = value @property def blocked(self) -> List[str]: return self._blocked @blocked.setter def blocked(self, items: List[str]) -> None: self._blocked = items @staticmethod def _get_content(item: Path) -> str: with open(item, "r", encoding="utf8") as fh_item: return fh_item.read() def _get_title(self, item: Path, sub: Optional[str] = None) -> str: drop = ["Existingmeasure", "Newmeasure"] conjunction = ["and", "from", "to", "with"] title_parts = str(item.stem).split("_") contains_conjunction = False for index, title_part in enumerate(title_parts): if title_part in conjunction: contains_conjunction = True continue title_parts[index] = title_part.capitalize() title_parts[0] = title_parts[0].capitalize() if title_parts[0] in drop: title_parts = title_parts[1:] title = " ".join(title_parts) if not contains_conjunction: if title_parts[-1].isdigit(): title = " ".join( title_parts[1:-1] + [title_parts[0]] + [title_parts[-1]] ) else: title = " ".join(title_parts[1:] + [title_parts[0]]) if sub: title = " ".join([title, sub.capitalize()]) title = title.replace("plot", " Plot") return title def _get_datafile(self, item: Path, content: str) -> str: datafiles = re.findall(GenExamples.datafile_re, content) if not datafiles or len(datafiles) > 1: raise ValueError(f"failed to find datafile {item}") datafile = "".join(datafiles) return datafile def _get_dataname(self, item: Path, content: str) -> str: datanames = re.findall(GenExamples.dataname_re, content) if not datanames or len(datanames) > 1: raise ValueError(f"failed to find dataname {item}") dataname = "".join(datanames) dataname = dataname.strip() return dataname def _get_sub(self, path: Path) -> Optional[str]: sub = os.path.relpath(path.parent, self._src) if self._merge_subfolders and sub != ".": return sub return None def _create_index(self, dst: str, depth: int, title: str) -> str: index = "/".join([self._dst, dst]) if dst != "." else self._dst assets = "/".join([".."] * (depth - 1)) if index not in self._indices: with mkdocs_gen_files.open(f"{index}/index.md", "a") as fh_index: meta = """---\nhide:\n - toc\n---""" fh_index.write(f"{meta}\n\n") fh_index.write(f"# {title}\n") fh_index.write( f'<script src="{assets + "/" + JS_ASSETS_PATH}/thumbs.js"></script>\n' ) self._indices.append(index) return index def _add_index_item( # pylint: disable=too-many-arguments self, index: str, title: str, href: str, thumbnail: Optional[str] = None, figcaption: bool = False, ) -> None: if not thumbnail: thumbnail = href if self._video_thumbnails: self._add_video(index, title, href, thumbnail, figcaption) else: self._add_image(index, title, href, thumbnail) def _add_index_sub_menus(self, index: str, groups: dict) -> None: with mkdocs_gen_files.open(f"{index}/index.md", "a") as fh_index: fh_index.write("<pre>") sorted_items = {} items = [] for group_title in groups.keys(): number = groups[group_title]["number"] if number: sorted_items[number] = group_title else: items.append(group_title) numbers = list(sorted_items.keys()) numbers.sort() numbers.reverse() for number in numbers: items.insert(0, sorted_items[number]) for group_title in items: self._add_index_item( index, group_title, groups[group_title]["dst"], groups[group_title]["dst"] + "/" + groups[group_title]["item"].stem, True, ) with mkdocs_gen_files.open(f"{index}/index.md", "a") as fh_index: fh_index.write("</pre>\n") def _add_image(self, index: str, title: str, href: str, thumbnail: str) -> None: with mkdocs_gen_files.open(f"{index}/index.md", "a") as fh_index: url = f"{VIZZU_SITE_URL}/{Vizzu.get_vizzu_version()}/{index}" fh_index.write( "[" + f"![{title}]" + f"({url}/{thumbnail}.png)" + f"{{ class='image-gallery', title='{title}' }}" + "]" + f"(./{href}.md)\n" ) def _add_video( # pylint: disable=too-many-arguments self, index: str, title: str, href: str, thumbnail: str, figcaption: bool ) -> None: url = f"{VIZZU_SITE_URL}/{Vizzu.get_vizzu_version()}/{index}" with mkdocs_gen_files.open(f"{index}/index.md", "a") as fh_index: html = [] html.append(f"<a href='./{href}/' title='{title}'>") if figcaption: html.append("<figure markdown class='image-figure'>") html.append( "<video nocontrols autoplay muted loop class='image-gallery-w-caption'" ) else: html.append( "<video nocontrols autoplay muted loop class='image-gallery'" ) html.append(f" src='{url}/{thumbnail}.webm'") html.append(" type='video/webm'>") html.append(f" src='{url}/{thumbnail}.mp4'") html.append(" type='video/mp4'>") html.append("</video>") if figcaption: html.append(f"<figcaption class='image-caption'>{title}</figcaption>") html.append("</figure>") html.append("</a>\n") fh_index.write("".join(html)) @staticmethod def _find_dict_with_value( data: Union[dict, list], val: str ) -> Tuple[Optional[str], Optional[int]]: if isinstance(data, dict): return GenExamples._find_dict_with_value_in_dict(data, val) if isinstance(data, list): return GenExamples._find_dict_with_value_in_list(data, val) return None, None @staticmethod def _find_dict_with_value_in_dict( data: dict, val: str ) -> Tuple[Optional[str], Optional[int]]: for key, value in data.items(): if isinstance(value, dict): if len(value) == 1 and val in value.values(): return key, None nested_result = GenExamples._find_dict_with_value(value, val) if nested_result != (None, None): return nested_result if isinstance(value, list): nested_result = GenExamples._find_dict_with_value(value, val) if nested_result != (None, None): return nested_result if value == val: return key, None return None, None @staticmethod def _find_dict_with_value_in_list( data: list, val: str ) -> Tuple[Optional[str], Optional[int]]: for i, item in enumerate(data): if isinstance(item, dict) and len(item) == 1 and val in item.values(): return list(item.keys())[0], i if isinstance(item, (list, dict)): nested_result = GenExamples._find_dict_with_value(item, val) if nested_result != (None, None): return nested_result if item == val: return None, i return None, None @staticmethod def _generate_example_data(datafile: str, dataname: str) -> None: datakey = "_".join([datafile, dataname]) if dataname == "data": datakey = datafile if datakey not in GenExamples.datafiles: GenExamples.datafiles[datakey] = True datacontent = GenExamples._get_content(TEST_DATA_PATH / f"{datafile}.mjs") with mkdocs_gen_files.open(f"assets/data/{datafile}.js", "w") as fh_data: fh_data.write(datacontent) content = Node.node( True, GEN_PATH / "mjs2csv.mjs", f"{TEST_DATA_PATH}/{datafile}.mjs", dataname, ) with mkdocs_gen_files.open(f"assets/data/{datakey}.csv", "w") as f_example: f_example.write(content) def _generate_example_js( # pylint: disable=too-many-arguments self, item: Path, item_name: str, dst: str, depth: int, datafile: str, dataname: str, ) -> None: params = [str(item), "/".join([".."] * depth), datafile, dataname] content = Node.node(True, GEN_PATH / "mjs2js.mjs", *params) with mkdocs_gen_files.open( f"{self._dst}/{dst}/{item_name}/main.js", "w" ) as f_example: f_example.write(content) def _generate_example_md( # pylint: disable=too-many-arguments self, item: Path, item_name: str, dst: str, depth: int, datafile: str, dataname: str, title: str, ) -> None: params = [ str(item), str(TEST_DATA_PATH), "/".join([".."] * depth), datafile, dataname, title, ] content = Node.node(True, GEN_PATH / "mjs2md.mjs", *params) content = Vizzu.set_version(content) content = Markdown.format(content) with mkdocs_gen_files.open( f"{self._dst}/{dst}/{item_name}.md", "w" ) as f_example: f_example.write(content) def _generate_example( # pylint: disable=too-many-arguments self, item: Path, item_name: str, dst: str, depth: int, datafile: str, dataname: str, title: str, ) -> None: self._generate_example_md( item, item_name, dst, depth, datafile, dataname, title ) self._generate_example_js(item, item_name, dst, depth, datafile, dataname) GenExamples._generate_example_data(datafile, dataname) def generate(self) -> None: # pylint: disable=too-many-locals config = MkdocsConfig.load(MKDOCS_PATH / "mkdocs.yml") dst = "." depth = self._depth index = self._create_index(dst, depth, self._name) groups = {} items = list(self._src.rglob("*.mjs")) items.sort(key=lambda f: f.stem) for item in items: if item in self._blocked: continue item_name = item.stem dst = "." depth = self._depth sub_index = index sub = self._get_sub(item) if sub: item_name += f"_{sub}" if self._merge_subfolders: if item_name in self._generated: raise ValueError(f"example already exists {item_name}") self._generated.append(item_name) else: dst = os.path.relpath(item.parent, self._src) depth += dst.count("/") value = self._dst + "/" if dst != ".": depth += 1 value += dst + "/" group_title, number = GenExamples._find_dict_with_value( config["nav"], value ) if group_title and group_title not in groups: groups[group_title] = {"item": item, "dst": dst, "number": number} sub_index = self._create_index(dst, depth, group_title) # type: ignore content = GenExamples._get_content(item) datafile = self._get_datafile(item, content) dataname = self._get_dataname(item, content) title = self._get_title(item, sub) self._add_index_item(sub_index, title, item_name) self._generate_example( item, item_name, dst, depth, datafile, dataname, title ) self._add_index_sub_menus(index, groups) class GenShowcases(GenExamples): def generate(self) -> None: dst = "." depth = self._depth index = self._create_index(dst, depth, self._name) items = list(self._src.rglob("*.js")) + list(self._src.rglob("main.html")) for item in items: content = GenExamples._get_content(item) content = Vizzu.set_version(content) with mkdocs_gen_files.open( self._dst + "/" + os.path.relpath(item, SHOWCASES_PATH), "w" ) as fh_js: fh_js.write(content) items = list(self._src.rglob("*.md")) items.sort(key=lambda f: f.stem) for item in items: content = GenExamples._get_content(item) html = markdown.markdown(content) h1_titles = re.findall(r"<h1.*?>(.*?)</h1>", html) self._add_index_item(index, h1_titles[0], item.stem) def main() -> None: with chdir(REPO_PATH): presets = GenExamples( "Preset charts", PRESET_EXAMPLES_PATH, "examples/presets", ) presets.merge_subfolders = True presets.generate() static = GenExamples( "Static charts", STATIC_EXAMPLES_PATH, "examples/static", ) static.merge_subfolders = True static.generate() operations = GenExamples( "Analytical Operations", OPERATION_EXAMPLES_PATH, "examples/analytical_operations", ) operations.video_thumbnails = True operations.generate() real = GenShowcases( "Showcases", SHOWCASES_PATH, "showcases", ) real.video_thumbnails = True real.generate() main() src/ipyvizzu/integrations/fugue.py METASEP """ A module for Fugue integration. Example: Users should not instantiate this module directly. As long as you installed fugue and ipyvizzu, the extension is auto-registered. from fugue import fsql fsql(''' SELECT a, SUM(b) AS b FROM spark.table GROUP BY a ORDER BY b OUTPUT USING vizzu:bar(x="a", y="b", title="title") ''').run(spark_session) """ from typing import Any, Dict, Tuple import pandas as pd from fugue import DataFrames, Outputter # type: ignore from fugue.exceptions import FugueWorkflowError from fugue.extensions import namespace_candidate # type: ignore from fugue.plugins import parse_outputter # type: ignore from triad import assert_or_throw # type: ignore from ipyvizzu import Chart, Config, Data, DisplayTarget _TIMELINE_DEFAULT_CONF: Dict[str, Any] = dict( # pylint: disable=use-dict-literal show={"delay": 0}, hide={"delay": 0}, title={"duration": 0, "delay": 0}, duration=0.5, ) class _Visualize(Outputter): """ A Fugue outputter extension (majorly for Fugue SQL) Args: func: A function name of [Config][ipyvizzu.animation.Config] category: Can be preset or timeline """ def __init__(self, func: str, category: str) -> None: super().__init__() self._category = category self._func = getattr(Config, func) def process(self, dfs: DataFrames) -> None: assert_or_throw(len(dfs) == 1, FugueWorkflowError("not single input")) df = dfs[0].as_pandas() # pylint: disable=invalid-name if self._category == "timeline": self._process_timeline(df) else: self._process_preset(df) def _process_preset(self, df: pd.DataFrame) -> None: # pylint: disable=invalid-name data = Data() data.add_df(df) chart = Chart(display=DisplayTarget.END) chart.animate(data) chart.animate(self._func(dict(self.params))) def _process_timeline( self, df: pd.DataFrame # pylint: disable=invalid-name ) -> None: _p = dict(self.params) _pc = dict(_p.pop("config", {})) title = _pc.pop("title", "%s") key = _p.pop("by") conf = dict(_TIMELINE_DEFAULT_CONF) conf.update(_p) data = Data() chart = Chart(display=DisplayTarget.END) keys = df[key].unique() keys.sort() idx = pd.DataFrame({"_idx": range(len(keys)), key: keys}) df = df.sort_values(key).merge(idx) data.add_df(df) chart.animate(data) for i, key in enumerate(keys): _p2 = dict(_pc) _p2["title"] = (title % key) if "%s" in title else title chart.animate(Data.filter(f"record._idx == {i}"), self._func(_p2), **conf) @parse_outputter.candidate(namespace_candidate("vizzu", lambda x: isinstance(x, str))) def _parse_vizzu(obj: Tuple[str, str]) -> Outputter: if obj[1].startswith("timeline_"): return _Visualize(obj[1].split("_", 1)[1], "timeline") return _Visualize(obj[1], "preset") src/ipyvizzu/integrations/__init__.py METASEP """A module for integrations.""" src/ipyvizzu/data/type_alias.py METASEP """ This module provides typing aliases for data used in ipyvizzu. """ from typing import Dict, List, Sequence, Union DimensionValue = str """ Represents a value that can be either a string or a number, but both will be treated as strings. """ MeasureValue = Union[int, float] """ Represents a numerical value, which can be either an int or a float. """ NestedMeasureValues = Union[MeasureValue, List["NestedMeasureValues"]] """ Represents a nested structure of MeasureValues. It can be a single MeasureValue or a list containing other NestedMeasureValues. """ RecordValue = Union[DimensionValue, MeasureValue] """ Represents a value that can be either a DimensionValue or a MeasureValue. """ Record = Union[List[RecordValue], Dict[str, RecordValue]] """ Represents a Record, which is a collection of RecordValues. A Record can be represented as either a list of RecordValues or a dictionary where keys are series names and values are the corresponding RecordValues. """ SeriesValues = Union[Sequence[DimensionValue], Sequence[MeasureValue]] """ Represents a collection of values for a Series. It can be a list of DimensionValues or a list of MeasureValues. """ Series = Dict[str, Union[str, SeriesValues]] """ Represents a Series in a dictionary format. It consists of a name (string), an optional type (also a string), and a values key which contains a SeriesValues. """ src/ipyvizzu/data/infer_type.py METASEP """ This module provides the `InferType` class, which stores data infer types. """ from enum import Enum class InferType(Enum): """ An enum class for storing data infer types. Attributes: DIMENSION: An enum key-value for storing dimension infer type. Dimensions are categorical series that can contain strings and numbers, but both will be treated as strings. MEASURE: An enum key-value for storing measure infer type. Measures can only be numerical. """ DIMENSION: str = "dimension" MEASURE: str = "measure" src/ipyvizzu/data/__init__.py METASEP """ This module serves as a collection of data-related utilities, including converter classes, data infer types and data typing aliases. """ tools/modules/vizzu.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from pathlib import Path import re import ipyvizzu REPO_PATH = Path(__file__).parent / ".." / ".." VIZZU_BACKEND_URL = "" VIZZU_STYLEREF_BACKEND_URL = "" IPYVIZZU_VERSION = "" VIZZU_VERSION = "" IPYVIZZU_SITE_URL = "https://ipyvizzu.vizzuhq.com" VIZZU_SITE_URL = "https://lib.vizzuhq.com" VIZZU_CDN_URL = "https://cdn.jsdelivr.net/npm/vizzu" class Vizzu: _ipyvizzu_version = "" _vizzu_version = "" @staticmethod def get_vizzu_backend_url() -> str: if VIZZU_BACKEND_URL: return VIZZU_BACKEND_URL version = Vizzu.get_vizzu_version() return f"{VIZZU_CDN_URL}@{version}/dist/vizzu.min.js" @staticmethod def get_vizzu_styleref_backend_url() -> str: if VIZZU_STYLEREF_BACKEND_URL: return VIZZU_STYLEREF_BACKEND_URL version = Vizzu.get_vizzu_version() return f"{VIZZU_CDN_URL}@{version}/dist/vizzu.min.js" @staticmethod def get_vizzu_version() -> str: if VIZZU_VERSION: return VIZZU_VERSION if not Vizzu._vizzu_version: cdn = ipyvizzu.Chart.VIZZU Vizzu._vizzu_version = re.search(r"vizzu@([\d.]+)/", cdn).group(1) # type: ignore return Vizzu._vizzu_version @staticmethod def get_ipyvizzu_version() -> str: if IPYVIZZU_VERSION: return IPYVIZZU_VERSION if not Vizzu._ipyvizzu_version: version = ipyvizzu.__version__ Vizzu._ipyvizzu_version = re.search(r"(\d+.\d+).\d+", version).group(1) # type: ignore return Vizzu._ipyvizzu_version @staticmethod def set_version(content: str, restore: bool = False) -> str: vizzu_version = Vizzu.get_vizzu_version() ipyvizzu_version = Vizzu.get_ipyvizzu_version() if not restore: content = content.replace( f"{IPYVIZZU_SITE_URL}/latest/", f"{IPYVIZZU_SITE_URL}/{ipyvizzu_version}/", ) content = content.replace( f"{VIZZU_SITE_URL}/latest/", f"{VIZZU_SITE_URL}/{vizzu_version}/", ) else: content = content.replace( f"{IPYVIZZU_SITE_URL}/{ipyvizzu_version}/", f"{IPYVIZZU_SITE_URL}/latest/", ) content = content.replace( f"{VIZZU_SITE_URL}/{vizzu_version}/", f"{VIZZU_SITE_URL}/latest/", ) return content tools/modules/node.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from pathlib import Path from subprocess import PIPE, Popen from typing import Union class Node: @staticmethod def node(strict: bool, script: Union[str, Path], *params: str) -> str: return Node.run(strict, "node", script, *params) @staticmethod def npx(strict: bool, script: Union[str, Path], *params: str) -> str: return Node.run(strict, "npx", script, *params) @staticmethod def run(strict: bool, exe: str, script: Union[str, Path], *params: str) -> str: with Popen( [exe, script, *params], stdin=PIPE, stdout=PIPE, stderr=PIPE, ) as node: outs, errs = node.communicate() if errs: print(errs.decode()) if node.returncode or (strict and errs): raise RuntimeError(f"failed to run {Path(script).stem}") return outs.decode() tools/modules/chdir.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import os from pathlib import Path import sys from typing import Union if sys.version_info >= (3, 11): from contextlib import chdir # pylint: disable=unused-import else: # TODO: remove once support for Python 3.10 is dropped from contextlib import contextmanager @contextmanager def chdir(path: Union[str, Path]): old_wd = os.getcwd() os.chdir(path) try: yield finally: os.chdir(old_wd) tools/docs/deploy.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from pathlib import Path from subprocess import Popen import sys REPO_PATH = Path(__file__).parent / ".." / ".." TOOLS_PATH = REPO_PATH / "tools" MKDOCS_PATH = TOOLS_PATH / "docs" sys.path.insert(0, str(TOOLS_PATH / "modules")) from chdir import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order chdir, ) from vizzu import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Vizzu, ) class Deploy: latest: bool = True @staticmethod def mike() -> None: version = Vizzu.get_ipyvizzu_version() params = [ "mike", "deploy", ] if Deploy.latest: params.append("-u") params.append(version) if Deploy.latest: params.append("latest") params.append("-F") params.append("tools/docs/mkdocs.yml") with Popen( params, ) as process: process.communicate() if process.returncode: raise RuntimeError("failed to run mike") @staticmethod def set_config(restore: bool) -> None: with open(MKDOCS_PATH / "mkdocs.yml", "r", encoding="utf8") as fh_readme: content = fh_readme.read() if not restore: if not Deploy.latest: content = content.replace( "- content.action.edit", "# - content.action.edit", ) else: if not Deploy.latest: content = content.replace( "# - content.action.edit", "- content.action.edit", ) with open(MKDOCS_PATH / "mkdocs.yml", "w", encoding="utf8") as fh_readme: fh_readme.write(content) def main() -> None: with chdir(REPO_PATH): Deploy.set_config(restore=False) Deploy.mike() Deploy.set_config(restore=True) main() tools/docs/config.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from pathlib import Path from typing import Optional import yaml class MkdocsConfig: # pylint: disable=too-few-public-methods @staticmethod def _format_url(url: Optional[str]) -> Optional[str]: if url and url.endswith("/"): return url[:-1] return url @staticmethod def _format(config: dict) -> dict: if "site_url" in config: config["site_url"] = MkdocsConfig._format_url(config["site_url"]) return config @staticmethod def load(config: Path) -> dict: with open(config, "rt", encoding="utf8") as f_yml: return MkdocsConfig._format(yaml.load(f_yml, Loader=yaml.FullLoader)) tools/ci/version.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import json from pathlib import Path import sys REPO_PATH = Path(__file__).parent / ".." / ".." TOOLS_PATH = REPO_PATH / "tools" sys.path.insert(0, str(TOOLS_PATH / "modules")) from chdir import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order chdir, ) from vizzu import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Vizzu, ) class Version: @staticmethod def set_readme_version(restore: bool) -> None: with open("README.md", "r", encoding="utf8") as fh_readme: content = fh_readme.read() content = Vizzu.set_version(content, restore) with open("README.md", "w", encoding="utf8") as fh_readme: fh_readme.write(content) @staticmethod def set_src_version(restore: bool) -> None: for item in (REPO_PATH / "src").rglob("*.py"): with open(item, "r", encoding="utf8") as fh_item: content = fh_item.read() content = Vizzu.set_version(content, restore) with open(item, "w", encoding="utf8") as fh_item: fh_item.write(content) def main() -> None: with chdir(REPO_PATH): restore = json.loads(sys.argv[1].lower()) Version.set_readme_version(restore) Version.set_src_version(restore) main() tools/ci/std_check.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import sys import subprocess def main() -> None: with subprocess.Popen( sys.argv[1:], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) as process: out, err = process.communicate() if out or err or process.returncode: if out: print(out.decode()) if err: print(err.decode()) raise RuntimeError(f"failed to run {sys.argv[1]}") main() tools/ci/markdown_format.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import mdformat class Markdown: # pylint: disable=too-few-public-methods @staticmethod def format(content: str) -> str: return mdformat.text( # type: ignore content, options={"wrap": 80, "end-of-line": "keep", "line-length": 70}, extensions={ "gfm", "tables", "footnote", "frontmatter", "configblack", "admonition", }, codeformatters={ "python", "bash", "sh", "json", "toml", "yaml", "javascript", "js", "css", "html", "xml", }, ) tools/ci/get_tag.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from pathlib import Path import re import sys import requests REPO_PATH = Path(__file__).parent / ".." / ".." TOOLS_PATH = REPO_PATH / "tools" sys.path.insert(0, str(TOOLS_PATH / "modules")) from chdir import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order chdir, ) from vizzu import ( # pylint: disable=import-error, wrong-import-position, wrong-import-order Vizzu, ) OWNER = "vizzuhq" REPO = "vizzu-lib" if __name__ == "__main__": with chdir(REPO_PATH): vizzu_version = Vizzu.get_vizzu_version() api_url = f"https://api.github.com/repos/{OWNER}/{REPO}/tags" response = requests.get(api_url, timeout=10) response.raise_for_status() tags = response.json() patch_versions = [ int( re.search( rf"^v{re.escape(vizzu_version)}\.(\d+)", tag["name"] # type: ignore ).group(1) ) for tag in tags if re.search(rf"^v{re.escape(vizzu_version)}\.(\d+)", tag["name"]) ] latest_patch_version = max(patch_versions) checkout_ref = f"v{vizzu_version}.{latest_patch_version}" print(checkout_ref) src/ipyvizzu/template.py METASEP """A module for storing the JavaScript templates.""" from enum import Enum class ChartProperty(Enum): """An enum class for storing chart properties.""" CONFIG = "config" """An enum key-value for storing config chart property.""" STYLE = "style" """An enum key-value for storing style chart property.""" class DisplayTarget(Enum): """An enum class for storing chart display options.""" BEGIN = "begin" """Display all animation steps after the constructor's cell.""" END = "end" """Display all animation steps after the last running cell.""" ACTUAL = "actual" """Display the actual animation step after the currently running cell.""" MANUAL = "manual" """Display all animation steps after calling a show method.""" class DisplayTemplate: """A class for storing JavaScript snippet templates.""" # pylint: disable=too-few-public-methods IPYVIZZUJS: str = "{ipyvizzujs}" """ipyvizzu JavaScript class.""" INIT: str = ( "window.ipyvizzu.createChart(element, " + "'{chart_id}', '{vizzu}', '{div_width}', '{div_height}');" ) """Call createChart JavaScript method.""" CHANGE_ANALYTICS_TO: str = ( "if (window.IpyVizzu) window.IpyVizzu.changeAnalyticsTo({analytics});" ) """Call changeAnalyticsTo JavaScript method.""" ANIMATE: str = ( "window.ipyvizzu.animate(element, " + "'{chart_id}', '{anim_id}', '{display_target}', {scroll}, " + "lib => {{ return {chart_target} }}, {chart_anim_opts});" ) """Call animate JavaScript method.""" FEATURE: str = ( "window.ipyvizzu.feature(element, '{chart_id}', '{name}', {enabled});" ) """Call feature JavaScript method.""" STORE: str = "window.ipyvizzu.store(element, '{chart_id}', '{id}');" """Call store JavaScript method.""" SET_EVENT: str = ( "window.ipyvizzu.setEvent(element, " + "'{chart_id}', '{id}', '{event}', event => {{ {handler} }});" ) """Call setEvent JavaScript method.""" CLEAR_EVENT: str = ( "window.ipyvizzu.clearEvent(element, '{chart_id}', '{id}', '{event}');" ) """Call clearEvent JavaScript method.""" LOG: str = "window.ipyvizzu.log(element, '{chart_id}', '{chart_property}');" """Call log JavaScript method.""" CONTROL: str = "window.ipyvizzu.control(element, '{method}', {params});" """Call animation control JavaScript methods.""" CLEAR_INHIBITSCROLL: str = ( "if (window.IpyVizzu) { window.IpyVizzu.clearInhibitScroll(element); }" ) """Call clearInhibitScroll JavaScript method if ipyvizzu JavaScript class exists.""" src/ipyvizzu/schema.py METASEP """A module for storing the data schema.""" NAMED_SCHEMA: dict = { "type": "array", "items": { "type": "object", "properties": { "name": {"type": "string"}, "values": {"type": "array", "optional": True}, "type": {"type": "string", "optional": True}, }, "required": ["name"], }, } """Store the schema of the `series`, `dimensions` and `measures` data types.""" RECORD_SCHEMA: dict = { "type": "array", "items": {"anyOf": [{"type": "array"}, {"type": "object"}]}, } """Store the schema of the `records` data type.""" DATA_SCHEMA: dict = { "type": "object", "oneOf": [ { "properties": { "series": NAMED_SCHEMA, "records": RECORD_SCHEMA, "filter": {"optional": True}, }, "additionalProperties": False, }, { "properties": { "dimensions": NAMED_SCHEMA, "measures": NAMED_SCHEMA, "filter": {"optional": True}, }, "additionalProperties": False, "required": ["dimensions", "measures"], }, ], } """Store the schema of the data animation.""" src/ipyvizzu/method.py METASEP """A module for working with template methods.""" import json from typing import Optional from ipyvizzu.animation import AbstractAnimation, PlainAnimation from ipyvizzu.event import EventHandler from ipyvizzu.template import ChartProperty class Method: """A class for storing and dumping any kind of data.""" # pylint: disable=too-few-public-methods _data: dict def dump(self) -> dict: """ A method for returning the stored data. Returns: The stored data. """ return self._data class Animate(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.ANIMATE][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__( self, chart_target: AbstractAnimation, chart_anim_opts: Optional[dict] = None, ): """ Animate constructor. It stores and dumps `chart_target` and `chart_anim_opts` parameters. Args: chart_target: AbstractAnimation inherited object such as [Data][ipyvizzu.animation.Data] [Config][ipyvizzu.animation.Config] or [Style][ipyvizzu.animation.Style]. chart_anim_opts: Animation options' dictionary. If it is not set, it dumps `undefined`. """ self._data = { "chart_target": chart_target.dump(), "chart_anim_opts": PlainAnimation(chart_anim_opts).dump() if chart_anim_opts else "undefined", } class Feature(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.FEATURE][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__(self, name: str, enabled: bool): """ Feature constructor. It stores and dumps `name` and `enabled` parameters. Args: name: The name of a chart feature. enabled: The new state of a chart feature. """ self._data = {"name": name, "enabled": json.dumps(enabled)} class Store(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.STORE][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__(self, snapshot_id: str): """ Store constructor. It stores and dumps `snapshot_id` parameter. Args: snapshot_id: The id of snapshot object. """ self._data = {"id": snapshot_id} class EventOn(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.SET_EVENT][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__(self, event_handler: EventHandler): """ EventOn constructor. It stores and dumps the `id`, the `event` and the `handler` of the event handler object. Args: event_handler: An event handler object. """ self._data = { "id": event_handler.id, "event": event_handler.event, "handler": event_handler.handler, } class EventOff(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.CLEAR_EVENT][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__(self, event_handler: EventHandler): """ EventOff constructor. It stores and dumps the `id` and the `event` of the event handler object. Args: event_handler: An event handler object. """ self._data = {"id": event_handler.id, "event": event_handler.event} class Log(Method): """ A class for dumping chart independent parameters to [DisplayTemplate.LOG][ipyvizzu.template.DisplayTemplate] template. """ # pylint: disable=too-few-public-methods def __init__(self, chart_property: ChartProperty): """ Log constructor. It stores and dumps the value of the chart property object. Args: chart_property: A chart property such as [CONFIG][ipyvizzu.template.ChartProperty] and [STYLE][ipyvizzu.template.ChartProperty]. """ self._data = {"chart_property": chart_property.value} src/ipyvizzu/json.py METASEP """A module for working JavaScript code in json convertible objects.""" import json from typing import Any, Optional import uuid class RawJavaScript: """A class for representing raw JavaScript code.""" # pylint: disable=too-few-public-methods def __init__(self, raw: Optional[str]): """ RawJavaScript constructor. It stores raw JavaScript code as a string. Args: raw: JavaScript code as `str`. """ self._raw = raw @property def raw(self) -> Optional[str]: """ A property for storing raw JavaScript code as a string. Returns: Raw JavaScript code as `str`. """ return self._raw class RawJavaScriptEncoder(json.JSONEncoder): """ A class for representing a custom json encoder, it can encode objects that contain [RawJavaScript][ipyvizzu.json.RawJavaScript] values. """ def __init__(self, *args, **kwargs): """ RawJavaScriptEncoder constructor. It extends [JSONEncoder][json.JSONEncoder] with an instance variable (`_raw_replacements`). The `_raw_replacements` dictionary stores the `uuids` and JavaScript codes of the [RawJavaScript][ipyvizzu.json.RawJavaScript] objects. """ json.JSONEncoder.__init__(self, *args, **kwargs) self._raw_replacements = {} def default(self, o: Any): """ Overrides [JSONEncoder.default][json.JSONEncoder.default] method. It replaces [RawJavaScript][ipyvizzu.json.RawJavaScript] object with `uuid` and it stores raw JavaScript code with `uuid` key in the `_raw_replacements` dictionary. """ if isinstance(o, RawJavaScript): key = uuid.uuid4().hex self._raw_replacements[key] = o.raw return key return json.JSONEncoder.default(self, o) def encode(self, o: Any): """ Overrides [JSONEncoder.encode][json.JSONEncoder.encode] method. It replaces `uuids` with raw JavaScript code without apostrophes. """ result = json.JSONEncoder.encode(self, o) for key, val in self._raw_replacements.items(): result = result.replace(f'"{key}"', val) return result src/ipyvizzu/event.py METASEP """A module for working with JavaScript events""" import uuid class EventHandler: """A class for representing an event handler.""" def __init__(self, event: str, handler: str): """ EventHandler constructor. It generates a uuid for the event handler, stores the event type and the body of the handler function. Args: event: The type of the event. handler: The body of the handler function. """ self._id = uuid.uuid4().hex[:7] self._event = event self._handler = " ".join(handler.split()) @property def id(self) -> str: # pylint: disable=invalid-name """ A property for storing an id. Returns: The uuid of the event handler. """ return self._id @property def event(self) -> str: """ A property for storing an event type. Returns: The type of the event. """ return self._event @property def handler(self) -> str: """ A property for storing an event handler function. Returns: The body of the handler function. """ return self._handler src/ipyvizzu/chart.py METASEP """A module for working with Vizzu charts.""" import pkgutil import uuid from typing import List, Optional, Union from IPython.display import display_javascript # type: ignore from IPython import get_ipython # type: ignore from ipyvizzu.animation import AbstractAnimation, Snapshot, AnimationMerger from ipyvizzu.animationcontrol import AnimationControl from ipyvizzu.method import Animate, Feature, Store, EventOn, EventOff, Log from ipyvizzu.template import ChartProperty, DisplayTarget, DisplayTemplate from ipyvizzu.event import EventHandler from ipyvizzu.__version__ import __version__ class Chart: """A class for representing a wrapper over Vizzu chart.""" # pylint: disable=too-many-instance-attributes VIZZU: str = "https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js" """A variable for storing the default url of vizzu package.""" def __init__( self, vizzu: str = VIZZU, width: str = "800px", height: str = "480px", display: Union[DisplayTarget, str] = DisplayTarget.ACTUAL, ): """ Chart constructor. Args: vizzu: The url of Vizzu JavaScript package. width: The width of the chart. height: The height of the chart. display: The display behaviour of the chart. """ self._chart_id: str = uuid.uuid4().hex[:7] self._vizzu: str = vizzu self._width: str = width self._height: str = height self._display_target: DisplayTarget = DisplayTarget(display) self._calls: List[str] = [] self._last_anim: Optional[str] = None self._showed: bool = False self._initialized: bool = False self._analytics: bool = True self._scroll_into_view: bool = False @staticmethod def _register_events() -> None: ipy = get_ipython() if ipy is not None: ipy.events.register("pre_run_cell", Chart._register_pre_run_cell) @staticmethod def _register_pre_run_cell() -> None: display_javascript(DisplayTemplate.CLEAR_INHIBITSCROLL, raw=True) @property def analytics(self) -> bool: """ A property for enabling/disabling the usage statistics feature. The usage statistics feature allows aggregate usage data collection using Plausible's algorithm. Enabling this feature helps us follow the progress and overall trends of our library, allowing us to focus our resources effectively and better serve our users. We do not track, collect, or store any personal data or personally identifiable information. All data is isolated to a single day, a single site, and a single device only. Please note that even when this feature is enabled, publishing anything made with `ipyvizzu` remains GDPR compatible. Returns: The value of the property (default `True`). """ return self._analytics @analytics.setter def analytics(self, analytics: Optional[bool]) -> None: self._analytics = bool(analytics) if self._initialized: self._display_analytics() @property def scroll_into_view(self) -> bool: """ A property for turning on/off the scroll into view feature. Returns: The value of the property (default `False`). """ return self._scroll_into_view @scroll_into_view.setter def scroll_into_view(self, scroll_into_view: Optional[bool]) -> None: self._scroll_into_view = bool(scroll_into_view) @property def control(self) -> AnimationControl: """ A property for returning a control object of the last animation. Raises: AssertionError: If called before any animation plays. Returns: The control object of the last animation. """ assert self._last_anim, "must be used after an animation." return AnimationControl(self._chart_id, self._last_anim, self._display) def initializing(self) -> None: """A method for initializing the chart.""" if not self._initialized: self._initialized = True self._display_ipyvizzujs() self._display_analytics() if self._display_target != DisplayTarget.MANUAL: Chart._register_events() self._display_chart() def _display_ipyvizzujs(self) -> None: ipyvizzurawjs = pkgutil.get_data(__name__, "templates/ipyvizzu.js") ipyvizzujs = ipyvizzurawjs.decode("utf-8").replace( # type: ignore '"__version__"', f'"{__version__}"' ) self._display(DisplayTemplate.IPYVIZZUJS.format(ipyvizzujs=ipyvizzujs)) def _display_analytics(self) -> None: self._display( DisplayTemplate.CHANGE_ANALYTICS_TO.format( analytics=str(self._analytics).lower() ) ) def _display_chart(self) -> None: self._display( DisplayTemplate.INIT.format( chart_id=self._chart_id, vizzu=self._vizzu, div_width=self._width, div_height=self._height, ) ) def animate( self, *animations: AbstractAnimation, **options: Optional[Union[str, int, float, dict]], ) -> None: """ A method for changing the state of the chart. Args: *animations: List of AbstractAnimation inherited objects such as [Data][ipyvizzu.animation.Data], [Config][ipyvizzu.animation.Config] and [Style][ipyvizzu.animation.Style]. **options: Dictionary of animation options for example `duration=1`. For information on all available animation options see the [Vizzu Code reference](https://lib.vizzuhq.com/latest/reference/interfaces/Anim.Options/#properties). Raises: ValueError: If `animations` is not set. Example: Reset the chart styles: chart.animate(Style(None)) """ # pylint: disable=line-too-long if not animations: raise ValueError("No animation was set.") animation = AnimationMerger.merge_animations(animations) animate = Animate(animation, options) self._last_anim = uuid.uuid4().hex[:7] self._display( DisplayTemplate.ANIMATE.format( display_target=self._display_target.value, chart_id=self._chart_id, anim_id=self._last_anim, scroll=str(self._scroll_into_view).lower(), **animate.dump(), ) ) def feature(self, name: str, enabled: bool) -> None: """ A method for turning on/off features of the chart. Args: name: The name of the chart feature. For information on all available features see the [Vizzu Code reference](https://lib.vizzuhq.com/latest/reference/modules/#feature). enabled: The new state of the chart feature. Example: Turn on `tooltip` of the chart: chart.feature("tooltip", True) """ # pylint: disable=line-too-long self._display( DisplayTemplate.FEATURE.format( chart_id=self._chart_id, **Feature(name, enabled).dump(), ) ) def store(self) -> Snapshot: """ A method for saving and storing the actual state of the chart. Returns: A Snapshot object wich stores the actual state of the chart. Example: Save and restore the actual state of the chart: snapshot = chart.store() ... chart.animate(snapshot) """ snapshot_id = uuid.uuid4().hex[:7] self._display( DisplayTemplate.STORE.format( chart_id=self._chart_id, **Store(snapshot_id).dump() ) ) return Snapshot(snapshot_id) def on( # pylint: disable=invalid-name self, event: str, handler: str ) -> EventHandler: """ A method for creating and turning on an event handler. Args: event: The type of the event. For information on all available events see the [Vizzu Code reference](https://lib.vizzuhq.com/latest/reference/modules/Event/#type). handler: The JavaScript method of the event. Returns: The turned on event handler object. Example: Turn on an event handler which prints an alert message when someone clicks on the chart: handler = chart.on("click", "alert(JSON.stringify(event.data));") """ # pylint: disable=line-too-long event_handler = EventHandler(event, handler) self._display( DisplayTemplate.SET_EVENT.format( chart_id=self._chart_id, **EventOn(event_handler).dump(), ) ) return event_handler def off(self, event_handler: EventHandler) -> None: """ A method for turning off an event handler. Args: event_handler: A previously created event handler object. Example: Turn off a previously created event handler: chart.off(handler) """ self._display( DisplayTemplate.CLEAR_EVENT.format( chart_id=self._chart_id, **EventOff(event_handler).dump(), ) ) def log(self, chart_property: ChartProperty) -> None: """ A method for printing chart properties to the browser console. Args: chart_property: A chart property such as [CONFIG][ipyvizzu.template.ChartProperty] and [STYLE][ipyvizzu.template.ChartProperty]. Example: Log the actual style of the chart to the browser console: chart.log(ChartProperty.STYLE) """ self._display( DisplayTemplate.LOG.format( chart_id=self._chart_id, **Log(chart_property).dump() ) ) def _repr_html_(self) -> str: assert ( self._display_target == DisplayTarget.MANUAL ), "chart._repr_html_() can be used with display=DisplayTarget.MANUAL only" assert not self._showed, "cannot be used after chart displayed." self._showed = True if not self._initialized: return "" html_id = uuid.uuid4().hex[:7] script = ( self._calls[0] + "\n" + "\n".join(self._calls[1:]).replace( "element", f'document.getElementById("{html_id}")' ) ) return f'<div id="{html_id}"><script>{script}</script></div>' def show(self) -> None: """ A method for displaying the assembled JavaScript code. Raises: AssertionError: If [display][ipyvizzu.Chart.__init__] is not [DisplayTarget.MANUAL][ipyvizzu.template.DisplayTarget]. AssertionError: If chart already has been displayed. """ assert ( self._display_target == DisplayTarget.MANUAL ), "chart.show() can be used with display=DisplayTarget.MANUAL only" assert not self._showed, "cannot be used after chart displayed" display_javascript( "\n".join(self._calls), raw=True, ) self._showed = True def _display(self, javascript: str) -> None: if not self._initialized: self.initializing() if self._display_target != DisplayTarget.MANUAL: display_javascript( javascript, raw=True, ) else: assert not self._showed, "cannot be used after chart displayed" self._calls.append(javascript) src/ipyvizzu/animationcontrol.py METASEP """A module for working with animation control.""" from typing import Union, Callable import uuid from ipyvizzu.template import DisplayTemplate from ipyvizzu.animation import Animation class AnimationControl: """ A class for controlling animations. """ def __init__(self, prev_id: str, last_id: str, display_method: Callable): """ AnimationControl constructor. Args: prev_id: Id of the previous animation promise. last_id: Id of the animation to be controlled. display_method: Displaying function. """ self._ids = ", ".join([f"'{prev_id}'", f"'{last_id}'"]) self._display = display_method def cancel(self) -> None: """Cancels the animation, will reject the animation promise.""" self._display( DisplayTemplate.CONTROL.format( method="cancel", params=self._ids, ) ) def pause(self) -> None: """Pauses the controlled animation.""" self._display( DisplayTemplate.CONTROL.format( method="pause", params=self._ids, ) ) def play(self) -> None: """Plays/resumes playing of the controlled animation.""" self._display( DisplayTemplate.CONTROL.format( method="play", params=self._ids, ) ) def reverse(self) -> None: """Changes the direction of the controlled animation.""" self._display( DisplayTemplate.CONTROL.format( method="reverse", params=self._ids, ) ) def seek(self, value: Union[int, str]) -> None: """ Seeks the animation to the position specified by time or progress percentage. Args: value: The position specified by time or progress percentage. """ params = ", ".join([self._ids, f"'{value}'"]) self._display( DisplayTemplate.CONTROL.format( method="seek", params=params, ) ) def stop(self) -> None: """Stops the current animation seeking it back to its start position.""" self._display( DisplayTemplate.CONTROL.format( method="stop", params=self._ids, ) ) def store(self) -> Animation: """ A method for saving and storing the actual state of the animation. Returns: An `Animation` object wich stores the actual state of the animation. """ animation_id = uuid.uuid4().hex[:7] params = ", ".join([self._ids, f"'{animation_id}'"]) self._display( DisplayTemplate.CONTROL.format( method="store", params=params, ) ) return Animation(animation_id) src/ipyvizzu/animation.py METASEP """A module for working with chart animations.""" import abc import json from os import PathLike from typing import List, Optional, Tuple, Union import warnings import jsonschema # type: ignore from ipyvizzu.data.converters.defaults import NAN_DIMENSION, NAN_MEASURE from ipyvizzu.data.converters.df.defaults import MAX_ROWS from ipyvizzu.data.converters.numpy.converter import NumpyArrayConverter from ipyvizzu.data.converters.pandas.converter import PandasDataFrameConverter from ipyvizzu.data.converters.spark.converter import SparkDataFrameConverter from ipyvizzu.data.converters.numpy.type_alias import ColumnName, ColumnDtype from ipyvizzu.data.type_alias import ( DimensionValue, NestedMeasureValues, MeasureValue, Record, Series, SeriesValues, ) from ipyvizzu.json import RawJavaScript, RawJavaScriptEncoder from ipyvizzu.schema import DATA_SCHEMA class AbstractAnimation: """ An abstract class for representing animation objects that have `dump` and `build` methods. """ def dump(self) -> str: """ A method for converting the built dictionary into string. Returns: An str that has been json dumped with [RawJavaScriptEncoder][ipyvizzu.json.RawJavaScriptEncoder] from a dictionary. """ return json.dumps(self.build(), cls=RawJavaScriptEncoder) @abc.abstractmethod def build(self) -> dict: """ An abstract method for returning a dictionary with values that can be converted into json string. Returns: A dictionary that stored in the animation object. """ class PlainAnimation(dict, AbstractAnimation): """ A class for representing plain animation. It can build any dictionary. """ def build(self) -> dict: """ A method for returning the plain animation dictionary. Returns: A dictionary that stored in the plain animation object. """ return self class Data(dict, AbstractAnimation): """ A class for representing data animation. It can build data option of the chart. """ @classmethod def filter(cls, filter_expr: Optional[str] = None) -> "Data": """ A class method for creating a [Data][ipyvizzu.animation.Data] class instance with a data filter. Args: filter_expr: The JavaScript data filter expression. Returns: (Data): A data animation instance that contains a data filter. Example: Create a [Data][ipyvizzu.animation.Data] class with a data filter: filter = Data.filter("record['Genres'] == 'Pop'") """ data = cls() data.set_filter(filter_expr) return data def set_filter(self, filter_expr: Optional[str] = None) -> None: """ A method for adding a filter to an existing [Data][ipyvizzu.animation.Data] class instance. Args: filter_expr: The JavaScript data filter expression. Example: Add a data filter to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.set_filter("record['Genres'] == 'Pop'") """ filter_expr_raw_js = ( RawJavaScript(f"record => {{ return ({' '.join(filter_expr.split())}) }}") if filter_expr is not None else filter_expr ) self.update({"filter": filter_expr_raw_js}) @classmethod def from_json(cls, filename: Union[str, bytes, PathLike]) -> "Data": """ A method for returning a [Data][ipyvizzu.animation.Data] class instance which has been created from a json file. Args: filename: The path of the data source json file. Returns: (Data): A data animation instance that has been created from a json file. """ with open(filename, "r", encoding="utf8") as file_desc: return cls(json.load(file_desc)) def add_record(self, record: Record) -> None: """ A method for adding a record to an existing [Data][ipyvizzu.animation.Data] class instance. Args: record: A list that contains data values. Example: Adding a record to a [Data][ipyvizzu.animation.Data] class instance: data = Data() record = ["Pop", "Hard", 114] data.add_record(record) """ self._add_value("records", record) def add_records(self, records: List[Record]) -> None: """ A method for adding records to an existing [Data][ipyvizzu.animation.Data] class instance. Args: records: A list that contains data records. Example: Adding records to a [Data][ipyvizzu.animation.Data] class instance: data = Data() records = [ ["Pop", "Hard", 114], ["Rock", "Hard", 96], ["Pop", "Experimental", 127], ["Rock", "Experimental", 83], ] data.add_records(records) """ list(map(self.add_record, records)) def add_series( self, name: str, values: Optional[SeriesValues] = None, **kwargs ) -> None: """ A method for adding a series to an existing [Data][ipyvizzu.animation.Data] class instance. Args: name: The name of the series. values: The data values of the series. **kwargs (Optional): Arbitrary keyword arguments. For example infer type can be set with the `type` keywod argument. Example: Adding a series without values to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.add_series("Genres") Adding a series without values and with infer type to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.add_series("Kinds", type="dimension") Adding a series with values to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.add_series( "Popularity", [114, 96, 127, 83] ) """ self._add_named_value("series", name, values, **kwargs) def add_series_list(self, series: List[Series]) -> None: """ A method for adding list of series to an existing [Data][ipyvizzu.animation.Data] class instance. Args: series: List of series. """ if series: self.setdefault("series", []).extend(series) def add_dimension( self, name: str, values: Optional[List[DimensionValue]] = None, **kwargs ) -> None: """ A method for adding a dimension to an existing [Data][ipyvizzu.animation.Data] class instance. Args: name: The name of the dimension. values: The data values of the dimension. **kwargs (Optional): Arbitrary keyword arguments. Example: Adding a dimension with values to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.add_dimension("Genres", ["Pop", "Rock"]) """ self._add_named_value("dimensions", name, values, **kwargs) def add_measure( self, name: str, values: Optional[NestedMeasureValues] = None, **kwargs ) -> None: """ A method for adding a measure to an existing [Data][ipyvizzu.animation.Data] class instance. Args: name: The name of the measure. values: The data values of the measure. **kwargs (Optional): Arbitrary keyword arguments. Example: Adding a measure with values to a [Data][ipyvizzu.animation.Data] class instance: data = Data() data.add_measure( "Popularity", [ [114, 96], [127, 83], ], ) """ self._add_named_value("measures", name, values, **kwargs) def add_df( self, df: Optional[Union["pandas.DataFrame", "pandas.Series"]], # type: ignore default_measure_value: MeasureValue = NAN_MEASURE, default_dimension_value: DimensionValue = NAN_DIMENSION, max_rows: int = MAX_ROWS, include_index: Optional[str] = None, ) -> None: """ Add a `pandas` `DataFrame` or `Series` to an existing [Data][ipyvizzu.animation.Data] class instance. Args: df: The `pandas` `DataFrame` or `Series` to add. default_measure_value: The default measure value to fill empty values. Defaults to 0. default_dimension_value: The default dimension value to fill empty values. Defaults to an empty string. max_rows: The maximum number of rows to include in the converted series list. If the `df` contains more rows, a random sample of the given number of rows will be taken. include_index: Add the data frame's index as a column with the given name. Defaults to `None`. Example: Adding a data frame to a [Data][ipyvizzu.animation.Data] class instance: df = pd.DataFrame( { "Genres": ["Pop", "Rock", "Pop", "Rock"], "Kinds": ["Hard", "Hard", "Experimental", "Experimental"], "Popularity": [114, 96, 127, 83], } ) data = Data() data.add_df(df) """ # pylint: disable=too-many-arguments if not isinstance(df, type(None)): converter = PandasDataFrameConverter( df, default_measure_value, default_dimension_value, max_rows, include_index, ) series_list = converter.get_series_list() self.add_series_list(series_list) def add_data_frame( self, data_frame: Optional[Union["pandas.DataFrame", "pandas.Series"]], # type: ignore default_measure_value: MeasureValue = NAN_MEASURE, default_dimension_value: DimensionValue = NAN_DIMENSION, ) -> None: """ [Deprecated] This function is deprecated and will be removed in future versions. Use [add_df][ipyvizzu.animation.Data.add_df] function instead. Add a `pandas` `DataFrame` or `Series` to an existing [Data][ipyvizzu.animation.Data] class instance. Args: data_frame: The `pandas` `DataFrame` or `Series` to add. default_measure_value: The default measure value to fill empty values. Defaults to 0. default_dimension_value: The default dimension value to fill empty values. Defaults to an empty string. """ # pylint: disable=line-too-long reference = "https://ipyvizzu.vizzuhq.com/0.16/reference/ipyvizzu/animation/#ipyvizzu.animation.Data.add_df" warnings.warn( f"'add_data_frame' is deprecated and will be removed in future versions. Use 'add_df' instead - see {reference}", DeprecationWarning, stacklevel=2, ) self.add_df(data_frame, default_measure_value, default_dimension_value) def add_df_index( self, df: Optional[Union["pandas.DataFrame", "pandas.Series"]], # type: ignore column_name: str = "Index", ) -> None: """ Add the index of a `pandas` `DataFrame` as a series to an existing [Data][ipyvizzu.animation.Data] class instance. Args: df: The `pandas` `DataFrame` or `Series` from which to extract the index. column_name: Name for the index column to add as a series. Example: Adding a data frame's index to a [Data][ipyvizzu.animation.Data] class instance: df = pd.DataFrame( {"Popularity": [114, 96]}, index=["x", "y"] ) data = Data() data.add_df_index(df, "DataFrameIndex") data.add_df(df) """ if not isinstance(df, type(None)): converter = PandasDataFrameConverter(df, include_index=column_name) series_list = converter.get_series_from_index() self.add_series_list(series_list) def add_data_frame_index( self, data_frame: Optional[Union["pandas.DataFrame", "pandas.Series"]], # type: ignore name: str, ) -> None: """ [Deprecated] This function is deprecated and will be removed in future versions. Use [add_df_index][ipyvizzu.animation.Data.add_df_index] function instead. Add the index of a `pandas` `DataFrame` as a series to an existing [Data][ipyvizzu.animation.Data] class instance. Args: data_frame: The `pandas` `DataFrame` or `Series` from which to extract the index. name: The name of the index series. """ # pylint: disable=line-too-long reference = "https://ipyvizzu.vizzuhq.com/0.16/reference/ipyvizzu/animation/#ipyvizzu.animation.Data.add_df_index" warnings.warn( f"'add_data_frame_index' is deprecated and will be removed in future versions. Use 'add_df_index' instead - see {reference}", DeprecationWarning, stacklevel=2, ) self.add_df_index(data_frame, name) def add_np_array( self, np_array: Optional["numpy.array"], # type: ignore column_name: Optional[ColumnName] = None, column_dtype: Optional[ColumnDtype] = None, default_measure_value: MeasureValue = NAN_MEASURE, default_dimension_value: DimensionValue = NAN_DIMENSION, ) -> None: """ Add a `numpy` `array` to an existing [Data][ipyvizzu.animation.Data] class instance. Args: np_array: The `numpy` `array` to add. column_name: The name of a column. By default, uses column indices. Can be set with an Index:Name pair or, for single-dimensional arrays, with just the Name. column_dtype: The dtype of a column. By default, uses the np_array's dtype. Can be set with an Index:DType pair or, for single-dimensional arrays, with just the DType. default_measure_value: Default value to use for missing measure values. Defaults to 0. default_dimension_value: Default value to use for missing dimension values. Defaults to an empty string. Example: Adding a data frame to a [Data][ipyvizzu.animation.Data] class instance: np_array = np.zeros((3, 4)) data = Data() data.add_np_array(np_array) """ # pylint: disable=too-many-arguments if not isinstance(np_array, type(None)): converter = NumpyArrayConverter( np_array, column_name, column_dtype, default_measure_value, default_dimension_value, ) series_list = converter.get_series_list() self.add_series_list(series_list) def add_spark_df( self, df: Optional["pyspark.sql.DataFrame"], # type: ignore default_measure_value: MeasureValue = NAN_MEASURE, default_dimension_value: DimensionValue = NAN_DIMENSION, max_rows: int = MAX_ROWS, ) -> None: """ Add a `pyspark` `DataFrame` to an existing [Data][ipyvizzu.animation.Data] class instance. Args: df: The `pyspark` `DataFrame` to add. default_measure_value: The default measure value to fill empty values. Defaults to 0. default_dimension_value: The default dimension value to fill empty values. Defaults to an empty string. max_rows: The maximum number of rows to include in the converted series list. If the `df` contains more rows, a random sample of the given number of rows will be taken. """ if not isinstance(df, type(None)): converter = SparkDataFrameConverter( df, default_measure_value, default_dimension_value, max_rows ) series_list = converter.get_series_list() self.add_series_list(series_list) def _add_named_value( self, dest: str, name: str, values: Optional[ Union[ SeriesValues, NestedMeasureValues, ] ] = None, **kwargs, ) -> None: value = {"name": name, **kwargs} if values is not None: value["values"] = values self._add_value(dest, value) def _add_value(self, dest: str, value: Union[dict, list]) -> None: self.setdefault(dest, []).append(value) def build(self) -> dict: """ A method for validating and returning the data animation dictionary. Returns: A dictionary that stored in the data animation object. It contains a `data` key whose value is the stored animation. """ jsonschema.validate(self, DATA_SCHEMA) return {"data": self} class ConfigAttr(type): """ A metaclass class for the [Config][ipyvizzu.animation.Config] class. Returns a [Config][ipyvizzu.animation.Config] class with a chart preset if the `__getattr__` method called. For information on all available chart presets see the [Vizzu Code reference](https://lib.vizzuhq.com/latest/reference/modules/Presets/#interfaces). """ @classmethod def __getattr__(mcs, name): config_attr = mcs("ConfigAttr", (object,), {"name": name}) return config_attr._get_preset # pylint: disable=no-member def _get_preset(cls, preset): config = Config(RawJavaScript(f"lib.presets.{cls.name}({preset})")) return config class Config(AbstractAnimation, metaclass=ConfigAttr): """ A class for representing config animation. It can build config option of the chart. """ def __init__(self, data: Optional[Union[dict, RawJavaScript]]): """ Config constructor. Args: data: A config animation dictionary. For information on all available config parameters see the [Vizzu Code reference](https://lib.vizzuhq.com/latest/reference/interfaces/Config.Chart/#properties). """ # pylint: disable=line-too-long self._data = data def build(self) -> dict: """ A method for returning the config animation dictionary. Returns: A dictionary that stored in the config animation object. It contains a `config` key whose value is the stored animation. """ return {"config": self._data} class Style(AbstractAnimation): """ A class for representing style animation. It can build style option of the chart. """ def __init__(self, data: Optional[dict]): """ Style constructor. Args: data: A style animation dictionary. For information on all available style parameters see the [Style][styling-properties] chapter or the [Vizzu Code reference](https://lib.vizzuhq.com/latest/reference/interfaces/Styles.Chart/#properties). """ # pylint: disable=line-too-long self._data = data def build(self) -> dict: """ A method for returning the style animation dictionary. Returns: A dictionary that stored in the style animation object. It contains a `style` key whose value is the stored animation. """ return {"style": self._data} class Keyframe(AbstractAnimation): """ A class for representing keyframe animation. It can build keyframe of the chart. """ def __init__( self, *animations: AbstractAnimation, **options: Optional[Union[str, int, float, dict]], ): """ Keyframe constructor. Args: *animations: List of AbstractAnimation inherited objects such as [Data][ipyvizzu.animation.Data], [Config][ipyvizzu.animation.Config] and [Style][ipyvizzu.animation.Style]. **options: Dictionary of animation options for example `duration=1`. For information on all available animation options see the [Vizzu Code reference](https://lib.vizzuhq.com/latest/reference/interfaces/Anim.Options/#properties). Raises: ValueError: If `animations` is not set. ValueError: If initialized with a `Keyframe`. """ # pylint: disable=line-too-long if not animations: raise ValueError("No animation was set.") if [animation for animation in animations if isinstance(animation, Keyframe)]: raise ValueError("A Keyframe cannot contain a Keyframe.") self._keyframe = {} self._keyframe["target"] = AnimationMerger.merge_animations(animations).build() if options: self._keyframe["options"] = options def build(self) -> dict: """ A method for returning the keyframe animation dictionary. Returns: A dictionary that stored in the keyframe animation object. It contains a `target` key whose value is the stored animation and an optional `options` key whose value is the stored animation options. """ return self._keyframe class Snapshot(AbstractAnimation): """ A class for representing a stored chart state. It can build the snapshot id of the chart. """ def __init__(self, snapshot_id: str): """ Snapshot constructor. Args: snapshot_id: A snapshot id. """ self._snapshot_id = snapshot_id def build(self) -> str: # type: ignore """ A method for returning the snapshot id str. Returns: An str snapshot id that stored in the snapshot animation object. """ return self._snapshot_id class Animation(Snapshot): """ A class for representing a stored animation. It can build the snapshot id of the animation. """ class AnimationMerger(AbstractAnimation): """A class for merging different types of animations.""" def __init__(self) -> None: """AnimationMerger constructor.""" self._dict: dict = {} self._list: list = [] @classmethod def merge_animations( cls, animations: Tuple[AbstractAnimation, ...] ) -> AbstractAnimation: """ A class method for merging animations. Args: animations: List of `AbstractAnimation` inherited objects. Returns: An `AnimationMerger` class with the merged animations. """ if len(animations) == 1 and not isinstance(animations[0], Keyframe): return animations[0] merger = cls() for animation in animations: merger.merge(animation) return merger def merge(self, animation: AbstractAnimation) -> None: """ A method for merging an animation with the previously merged animations. Args: animation: An animation to be merged with with previously merged animations. Raises: ValueError: If the type of an animation is already merged. ValueError: If `Keyframe` is merged with different type of animation. """ if isinstance(animation, Keyframe): if self._dict: raise ValueError("Keyframe cannot be merged with other animations.") data = animation.build() self._list.append(data) else: if self._list: raise ValueError("Keyframe cannot be merged with other animations.") data = self._validate(animation) self._dict.update(data) def _validate(self, animation: AbstractAnimation) -> dict: if isinstance(animation, Snapshot): raise ValueError("Snapshot cannot be merged with other animations.") data = animation.build() common_keys = set(data).intersection(self._dict) if common_keys: raise ValueError(f"{common_keys} is already merged.") return data def build(self) -> Union[dict, list]: # type: ignore """ A method for returning a merged list of `Keyframes` or a merged dictionary from different types of animations. Returns: A merged list of [Keyframes][ipyvizzu.animation.Keyframe] or a merged dictionary from [Data][ipyvizzu.animation.Data], [Config][ipyvizzu.animation.Config] and [Style][ipyvizzu.animation.Style] animations. """ if self._dict: return self._dict return self._list src/ipyvizzu/__version__.py METASEP """A module for storing version number.""" __version__ = "0.15.0" src/ipyvizzu/__init__.py METASEP """ Build animated charts in `Jupyter Notebook` and similar environments with a simple `Python` syntax. `ipyvizzu` package consists of the following main modules: * [Chart][ipyvizzu.chart] * [Animation][ipyvizzu.animation] * [Animation Control][ipyvizzu.animationcontrol] * [Method][ipyvizzu.method] * [Event][ipyvizzu.event] * [Json][ipyvizzu.json] * [Template][ipyvizzu.template] * [Schema][ipyvizzu.schema] * [Data][ipyvizzu.data] * [Integrations][ipyvizzu.integrations] `ipyvizzu` package imports the following objects in `__init__.py`: * [Chart][ipyvizzu.chart.Chart] * [Data][ipyvizzu.animation.Data] * [Config][ipyvizzu.animation.Config] * [Style][ipyvizzu.animation.Style] * [Keyframe][ipyvizzu.animation.Keyframe] * [Snapshot][ipyvizzu.animation.Snapshot] * [Animation][ipyvizzu.animation.Animation] * [AbstractAnimation][ipyvizzu.animation.AbstractAnimation] * [PlainAnimation][ipyvizzu.animation.PlainAnimation] * [AnimationMerger][ipyvizzu.animation.AnimationMerger] * [AnimationControl][ipyvizzu.animationcontrol.AnimationControl] * [InferType][ipyvizzu.data.infer_type.InferType] * [NumpyArrayConverter][ipyvizzu.data.converters.numpy.converter.NumpyArrayConverter] * [PandasDataFrameConverter][ipyvizzu.data.converters.pandas.converter.PandasDataFrameConverter] * [Animate][ipyvizzu.method.Animate] * [Feature][ipyvizzu.method.Feature] * [Store][ipyvizzu.method.Store] * [EventOn][ipyvizzu.method.EventOn] * [EventOff][ipyvizzu.method.EventOff] * [Log][ipyvizzu.method.Log] * [Method][ipyvizzu.method.Method] * [EventHandler][ipyvizzu.event.EventHandler] * [RawJavaScript][ipyvizzu.json.RawJavaScript] * [RawJavaScriptEncoder][ipyvizzu.json.RawJavaScriptEncoder] * [ChartProperty][ipyvizzu.template.ChartProperty] * [DisplayTarget][ipyvizzu.template.DisplayTarget] * [DisplayTemplate][ipyvizzu.template.DisplayTemplate] """ from .chart import Chart from .animation import ( AbstractAnimation, PlainAnimation, Data, Config, Style, Keyframe, Snapshot, Animation, AnimationMerger, ) from .animationcontrol import AnimationControl from .data.converters.numpy.converter import NumpyArrayConverter from .data.converters.pandas.converter import PandasDataFrameConverter from .data.infer_type import InferType from .method import Method, Animate, Feature, Store, EventOn, EventOff, Log from .json import RawJavaScript, RawJavaScriptEncoder from .template import ChartProperty, DisplayTarget, DisplayTemplate from .event import EventHandler from .__version__ import __version__ __all__ = [ "Chart", "Data", "Config", "Style", "Keyframe", "Snapshot", "Animation", "AbstractAnimation", "PlainAnimation", "AnimationMerger", "Animate", "Feature", "Store", "EventOn", "EventOff", "Log", "AnimationControl", "NumpyArrayConverter", "PandasDataFrameConverter", "InferType", "Method", "EventHandler", "RawJavaScript", "RawJavaScriptEncoder", "ChartProperty", "DisplayTarget", "DisplayTemplate", ] tests/utils/normalizer.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import re import sys from typing import Optional from unittest.mock import MagicMock class Normalizer: def __init__(self) -> None: self.id1_pattern = re.compile(r"'[a-f0-9]{7}'", flags=re.MULTILINE) self.id2_pattern = re.compile(r"\\'[a-f0-9]{7}\\'", flags=re.MULTILINE) self.id3_pattern = re.compile(r"\"[a-f0-9]{7}\"", flags=re.MULTILINE) def normalize_id(self, output: str) -> str: normalized_output = output normalized_output = self.id1_pattern.sub("id", normalized_output) normalized_output = self.id2_pattern.sub("id", normalized_output) normalized_output = self.id3_pattern.sub("id", normalized_output) return normalized_output def normalize_output( self, output: MagicMock, start_index: int = 0, end_index: Optional[int] = None ) -> str: output_items = [] if not end_index: end_index = len(output.call_args_list) for block in output.call_args_list[start_index:end_index]: if sys.version_info >= (3, 8): args = block.args else: # TODO: remove once support for Python 3.7 is dropped args, _ = block output_items.append(args[0]) return self.normalize_id("\n".join(output_items)).strip() tests/utils/import_error.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from contextlib import contextmanager import os class RaiseImportError: @classmethod @contextmanager def module_name(cls, module_name: str): original_value = os.environ.get("RAISE_IMPORT_ERROR", None) os.environ["RAISE_IMPORT_ERROR"] = module_name try: yield finally: if original_value is None: os.environ.pop("RAISE_IMPORT_ERROR", None) else: os.environ["RAISE_IMPORT_ERROR"] = original_value @staticmethod def overwrite_imports() -> None: builtins = globals()["__builtins__"] def overwrite_import(original_import_builtin): def import_replacement(name, *args, **kwargs): module_name = os.environ.get("RAISE_IMPORT_ERROR", None) if name == module_name: raise ImportError(f"{module_name} is not available") return original_import_builtin(name, *args, **kwargs) return import_replacement builtins["__import__"] = overwrite_import(builtins["__import__"]) tests/utils/__init__.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from tests.utils.import_error import RaiseImportError RaiseImportError.overwrite_imports() tests/test_method.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import unittest from ipyvizzu import ( Animate, AnimationMerger, ChartProperty, Config, EventHandler, EventOff, EventOn, Feature, Log, Method, Snapshot, Store, Style, ) class TestMethod(unittest.TestCase): def test_method(self) -> None: method = Method() with self.assertRaises(AttributeError): method.dump() def test_animate_with_anim_without_option(self) -> None: animation = Snapshot("abc1234") method = Animate(chart_target=animation) self.assertEqual( { "chart_target": '"abc1234"', "chart_anim_opts": "undefined", }, method.dump(), ) def test_animate_with_animmerger_without_option(self) -> None: config = Config({"title": "My first chart"}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) animation_merger = AnimationMerger() animation_merger.merge(config) animation_merger.merge(style) method = Animate(chart_target=animation_merger) self.assertEqual( { "chart_target": '{"config": ' + '{"title": "My first chart"}, ' + '"style": {"title": {"backgroundColor": "#A0A0A0"}}}', "chart_anim_opts": "undefined", }, method.dump(), ) def test_animate_with_anim_with_option(self) -> None: animation = Snapshot("abc1234") option = {"duration": 1, "easing": "linear"} method = Animate(chart_target=animation, chart_anim_opts=option) self.assertEqual( { "chart_target": '"abc1234"', "chart_anim_opts": '{"duration": 1, "easing": "linear"}', }, method.dump(), ) def test_animate_with_animmerger_with_option(self) -> None: config = Config({"title": "My first chart"}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) animation_merger = AnimationMerger() animation_merger.merge(config) animation_merger.merge(style) option = {"duration": 1, "easing": "linear"} method = Animate(chart_target=animation_merger, chart_anim_opts=option) self.assertEqual( { "chart_target": '{"config": ' + '{"title": "My first chart"}, ' + '"style": {"title": {"backgroundColor": "#A0A0A0"}}}', "chart_anim_opts": '{"duration": 1, "easing": "linear"}', }, method.dump(), ) def test_feature(self) -> None: method = Feature(name="tooltip", enabled=True) self.assertEqual({"name": "tooltip", "enabled": "true"}, method.dump()) def test_store(self) -> None: method = Store(snapshot_id="abc1234") self.assertEqual({"id": "abc1234"}, method.dump()) def test_event_on(self) -> None: event_handler = EventHandler( event="click", handler="alert(JSON.stringify(event.data));" ) method = EventOn(event_handler=event_handler) method_dump = method.dump() self.assertEqual( { "id": method_dump["id"], "event": "click", "handler": "alert(JSON.stringify(event.data));", }, method_dump, ) def test_event_off(self) -> None: event_handler = EventHandler( event="click", handler="alert(JSON.stringify(event.data));" ) method = EventOff(event_handler=event_handler) method_dump = method.dump() self.assertEqual( { "id": method_dump["id"], "event": "click", }, method_dump, ) def test_log(self) -> None: method = Log(chart_property=ChartProperty.CONFIG) self.assertEqual( { "chart_property": "config", }, method.dump(), ) tests/test_json.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import json import unittest from ipyvizzu import RawJavaScriptEncoder, RawJavaScript class TestRawJavaScriptEncoder(unittest.TestCase): def test_encoder_with_rawjavascript(self) -> None: raw_javascript = RawJavaScript("null") self.assertEqual( json.dumps({"test": raw_javascript}, cls=RawJavaScriptEncoder), '{"test": null}', ) def test_encoder_with_not_rawjavascript(self) -> None: class NotRawJavaScript: # pylint: disable=too-few-public-methods def __init__(self) -> None: pass not_raw_javascript = NotRawJavaScript() with self.assertRaises(TypeError): json.dumps({"test": not_raw_javascript}, cls=RawJavaScriptEncoder) tests/test_fugue.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring from contextlib import redirect_stdout import io import pathlib import sys import unittest import pandas as pd from tests.utils.normalizer import Normalizer if sys.version_info >= (3, 7): import fugue.api as fa import ipyvizzu.integrations.fugue # register the extension # pylint: disable=unused-import else: # TODO: remove once support for Python 3.6 is dropped pass class TestFugue(unittest.TestCase): # TODO: remove decorator once support for Python 3.6 is dropped @unittest.skipUnless(sys.version_info >= (3, 7), "at least Python 3.7 is required") def test_fugue_extension_preset(self) -> None: ref = pathlib.Path(__file__).parent / "assets" / "fugue_preset.txt" with open(ref, "r", encoding="utf8") as f_ref: ref_content = f_ref.read() df = pd.DataFrame({"a": list("abcde"), "b": range(5)}) stdout = io.StringIO() with redirect_stdout(stdout): fa.fugue_sql_flow( """ SELECT * FROM df WHERE b<5 OUTPUT USING vizzu:bar(x="a",y="b") """, df=df, ).run() self.assertEqual( Normalizer().normalize_id("\n".join(stdout.getvalue().split("\n")[1:])), ref_content, ) # TODO: remove decorator once support for Python 3.6 is dropped @unittest.skipUnless(sys.version_info >= (3, 7), "at least Python 3.7 is required") def test_fugue_extension_timeline(self) -> None: ref = pathlib.Path(__file__).parent / "assets" / "fugue_timeline.txt" with open(ref, "r", encoding="utf8") as f_ref: ref_content = f_ref.read() df = pd.DataFrame({"a": list("abcde"), "b": range(5), "c": [1, 1, 2, 2, 3]}) stdout = io.StringIO() with redirect_stdout(stdout): fa.fugue_sql_flow( """ SELECT * FROM df WHERE b<5 OUTPUT USING vizzu:timeline_bar( by="c", config={"x":"b","y":"a",title="x %s"}, duration=0.3 ) """, df=df, ).run() self.assertEqual( Normalizer().normalize_id("\n".join(stdout.getvalue().split("\n")[1:])), ref_content, ) tests/test_chart.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import abc from typing import Callable import unittest import unittest.mock from ipyvizzu import ( Animation, Chart, ChartProperty, Config, Data, EventHandler, Snapshot, Style, ) from tests.utils.normalizer import Normalizer class TestChart(unittest.TestCase, abc.ABC): normalizer: Normalizer @classmethod def setUpClass(cls) -> None: cls.normalizer = Normalizer() def setUp(self) -> None: self.patch = unittest.mock.patch(self.mock) self.trash = self.patch.start() self.chart = Chart() self.chart.initializing() def tearDown(self) -> None: self.patch.stop() @property def mock(self) -> str: return "ipyvizzu.chart.display_javascript" class TestChartInit(TestChart): def test_init(self) -> None: with unittest.mock.patch(self.mock) as output: chart = Chart() chart.initializing() self.assertEqual( self.normalizer.normalize_output(output, 2), "window.ipyvizzu.createChart(" + "element, " + "id, " + "'https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js', " + "'800px', '480px');", ) def test_init_vizzu(self) -> None: with unittest.mock.patch(self.mock) as output: chart = Chart( vizzu="https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js" ) chart.initializing() self.assertEqual( self.normalizer.normalize_output(output, 2), "window.ipyvizzu.createChart(" + "element, " + "id, " + "'https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js', " + "'800px', '480px');", ) def test_init_div(self) -> None: with unittest.mock.patch(self.mock) as output: chart = Chart(width="400px", height="240px") chart.initializing() self.assertEqual( self.normalizer.normalize_output(output, 2), "window.ipyvizzu.createChart(" + "element, " + "id, " + "'https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js', " + "'400px', '240px');", ) def test_init_display_invalid(self) -> None: with self.assertRaises(ValueError): Chart(display="invalid") def test_init_display_begin(self) -> None: with unittest.mock.patch(self.mock) as output: chart = Chart(display="begin") chart.animate(Snapshot("abc1234")) self.assertEqual( self.normalizer.normalize_output(output, 3), "window.ipyvizzu.animate(element, id, id, 'begin', false, " + "lib => { return id }, " + "undefined);", ) def test_init_display_actual(self) -> None: with unittest.mock.patch(self.mock) as output: chart = Chart(display="actual") chart.animate(Snapshot("abc1234")) self.assertEqual( self.normalizer.normalize_output(output, 3), "window.ipyvizzu.animate(element, id, id, 'actual', false, " + "lib => { return id }, " + "undefined);", ) def test_init_display_end(self) -> None: with unittest.mock.patch(self.mock) as output: chart = Chart(display="end") chart.animate(Snapshot("abc1234")) self.assertEqual( self.normalizer.normalize_output(output, 3), "window.ipyvizzu.animate(element, id, id, 'end', false, " + "lib => { return id }, " + "undefined);", ) def test_manual_init(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.initializing() self.chart.initializing() self.chart.initializing() self.chart.animate(Snapshot("abc1234")) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, id, 'actual', false, " + "lib => { return id }, " + "undefined);", ) def test_init_register_events(self) -> None: class IPyEvents: # pylint: disable=too-few-public-methods @staticmethod def register(event: str, function: Callable[[], None]) -> None: # pylint: disable=unused-argument function() class IPy: # pylint: disable=too-few-public-methods events = IPyEvents get_ipython_mock = "ipyvizzu.chart.get_ipython" with unittest.mock.patch(get_ipython_mock, return_value=IPy()): with unittest.mock.patch(self.mock) as output: chart = Chart() chart.initializing() self.assertEqual( self.normalizer.normalize_output(output, 2, 3), "if (window.IpyVizzu) { window.IpyVizzu.clearInhibitScroll(element); }", ) class TestChartMethods(TestChart): def test_animate_chart_target_has_to_be_passed(self) -> None: with self.assertRaises(ValueError): self.chart.animate() def test_animate_chart_target_has_to_be_passed_even_if_chart_anim_opts_passed( self, ) -> None: with self.assertRaises(ValueError): self.chart.animate(duration="500ms") def test_animate_one_chart_target(self) -> None: with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) self.chart.animate(data) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, id, 'actual', false, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}} }, ' + "undefined);", ) def test_animate_one_chart_target_with_chart_anim_opts(self) -> None: with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) self.chart.animate(data, duration="500ms") self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, id, 'actual', false, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}} }, ' + '{"duration": "500ms"});', ) def test_animate_snapshot_chart_target(self) -> None: with unittest.mock.patch(self.mock) as output: snapshot = Snapshot("abc1234") self.chart.animate(snapshot) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, id, 'actual', false, " + "lib => { return id }, " + "undefined);", ) def test_animate_snapshot_chart_target_with_chart_anim_opts(self) -> None: with unittest.mock.patch(self.mock) as output: snapshot = Snapshot("abc1234") self.chart.animate(snapshot, duration="500ms") self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, id, 'actual', false, " + "lib => { return id }, " + '{"duration": "500ms"});', ) def test_animate_stored_animation_chart_target(self) -> None: with unittest.mock.patch(self.mock) as output: animation = Animation("abc1234") self.chart.animate(animation, duration="500ms") self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, id, 'actual', false, " + "lib => { return id }, " + '{"duration": "500ms"});', ) def test_animate_more_chart_target(self) -> None: with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) config = Config({"channels": {"label": {"attach": ["Popularity"]}}}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) self.chart.animate(data, config, style) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, id, 'actual', false, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}, ' + '"config": {"channels": {"label": {"attach": ["Popularity"]}}}, ' + '"style": {"title": {"backgroundColor": "#A0A0A0"}}} }, ' + "undefined);", ) def test_animate_more_chart_target_with_chart_anim_opts(self) -> None: with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) config = Config({"channels": {"label": {"attach": ["Popularity"]}}}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) self.chart.animate(data, config, style, duration="500ms") self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, id, 'actual', false, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}, ' + '"config": {"channels": {"label": {"attach": ["Popularity"]}}}, ' + '"style": {"title": {"backgroundColor": "#A0A0A0"}}} }, ' + '{"duration": "500ms"});', ) def test_animate_more_chart_target_with_conflict(self) -> None: data = Data() data.add_record(["Rock", "Hard", 96]) config1 = Config({"channels": {"label": {"attach": ["Popularity"]}}}) config2 = Config({"title": "Test"}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) with self.assertRaises(ValueError): self.chart.animate(data, config1, style, config2) def test_animate_more_chart_target_with_snapshot(self) -> None: data = Data() data.add_record(["Rock", "Hard", 96]) config = Config({"channels": {"label": {"attach": ["Popularity"]}}}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) snapshot = Snapshot("abc1234") with self.assertRaises(ValueError): self.chart.animate(data, config, style, snapshot) def test_animate_more_calls(self) -> None: with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) config1 = Config({"channels": {"label": {"attach": ["Popularity"]}}}) config2 = Config({"title": "Test"}) style = Style({"title": {"backgroundColor": "#A0A0A0"}}) self.chart.animate(data, config1, style) self.chart.animate(config2) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, id, 'actual', false, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}, ' + '"config": {"channels": {"label": {"attach": ["Popularity"]}}}, ' + '"style": {"title": {"backgroundColor": "#A0A0A0"}}} }, ' + "undefined);\n" + "window.ipyvizzu.animate(element, id, id, 'actual', false, " + 'lib => { return {"config": {"title": "Test"}} }, ' + "undefined);", ) def test_animate_with_not_default_scroll_into_view(self) -> None: with unittest.mock.patch(self.mock) as output: data = Data() data.add_record(["Rock", "Hard", 96]) scroll_into_view = not self.chart.scroll_into_view self.chart.scroll_into_view = scroll_into_view self.chart.animate(data) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(" + f"element, id, id, 'actual', {str(scroll_into_view).lower()}, " + 'lib => { return {"data": {"records": [["Rock", "Hard", 96]]}} }, ' + "undefined);", ) def test_feature(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.feature("tooltip", True) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.feature(element, id, 'tooltip', true);", ) def test_store(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.store() self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.store(element, id, id);", ) class TestChartEvents(TestChart): def test_on(self) -> None: with unittest.mock.patch(self.mock) as output: handler_method = """event.renderingContext.fillStyle = (event.data.text === 'Jazz') ? 'red' : 'gray';""" self.chart.on("plot-axis-label-draw", handler_method) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.setEvent(" + "element, id, id, 'plot-axis-label-draw', " + "event => " + "{ event.renderingContext.fillStyle = " + "(event.data.text === 'Jazz') ? 'red' : 'gray'; });", ) def test_off(self) -> None: with unittest.mock.patch(self.mock) as output: handler_method = "alert(JSON.stringify(event.data));" handler = EventHandler("click", handler_method) self.chart.off(handler) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.clearEvent(element, id, id, 'click');", ) class TestChartLogs(TestChart): def test_log_config(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.log(ChartProperty.CONFIG) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.log(element, id, 'config');", ) def test_log_style(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.log(ChartProperty.STYLE) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.log(element, id, 'style');", ) def test_log_invalid(self) -> None: with self.assertRaises(AttributeError): self.chart.log(ChartProperty.INVALID) # type: ignore # pylint: disable=no-member class TestChartAnalytics(TestChart): def test_analytics_default_value(self) -> None: chart = Chart() self.assertEqual( chart.analytics, True, ) def test_change_analytics_before_initializing(self) -> None: with unittest.mock.patch(self.mock) as output: chart = Chart() chart.analytics = False chart.initializing() self.assertEqual( self.normalizer.normalize_output(output, 1), "if (window.IpyVizzu) window.IpyVizzu.changeAnalyticsTo(false);" + "\n" "window.ipyvizzu.createChart(" + "element, " + "id, " + "'https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js', " + "'800px', '480px');", ) def test_change_analytics_after_initializing(self) -> None: with unittest.mock.patch(self.mock) as output: chart = Chart() chart.initializing() chart.analytics = False chart.analytics = True self.assertEqual( self.normalizer.normalize_output(output, 1), "if (window.IpyVizzu) window.IpyVizzu.changeAnalyticsTo(true);" + "\n" + "window.ipyvizzu.createChart(" + "element, " + "id, " + "'https://cdn.jsdelivr.net/npm/[email protected]/dist/vizzu.min.js', " + "'800px', '480px');" + "\n" + "if (window.IpyVizzu) window.IpyVizzu.changeAnalyticsTo(false);" + "\n" + "if (window.IpyVizzu) window.IpyVizzu.changeAnalyticsTo(true);", ) class TestChartDisplay(TestChart): def test_repr_html_if_display_is_not_manual(self) -> None: self.chart.animate(Snapshot("abc1234")) with self.assertRaises(AssertionError): self.chart._repr_html_() # pylint: disable=protected-access def test_show_if_display_is_not_manual(self) -> None: self.chart.animate(Snapshot("abc1234")) with self.assertRaises(AssertionError): self.chart.show() def test_repr_html(self) -> None: display_mock = "ipyvizzu.Chart._display" with unittest.mock.patch(display_mock) as output: chart = Chart(display="manual") chart.animate(Snapshot("abc1234")) self.assertEqual( chart._showed, # pylint: disable=protected-access False, ) chart._repr_html_() # pylint: disable=protected-access self.assertEqual( chart._showed, # pylint: disable=protected-access True, ) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, id, 'manual', false, " + "lib => { return id }, " + "undefined);", ) def test_show(self) -> None: self.chart = Chart(display="manual") display_mock = "ipyvizzu.Chart._display" with unittest.mock.patch(display_mock) as output: self.chart.animate(Snapshot("abc1234")) self.assertEqual( self.chart._showed, # pylint: disable=protected-access False, ) self.chart.show() self.assertEqual( self.chart._showed, # pylint: disable=protected-access True, ) self.assertEqual( self.normalizer.normalize_output(output), "window.ipyvizzu.animate(element, id, id, 'manual', false, " + "lib => { return id }, " + "undefined);", ) def test_repr_html_after_repr_html(self) -> None: self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart._repr_html_() # pylint: disable=protected-access with self.assertRaises(AssertionError): self.chart._repr_html_() # pylint: disable=protected-access def test_repr_html_after_show(self) -> None: self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart.show() with self.assertRaises(AssertionError): self.chart._repr_html_() # pylint: disable=protected-access def test_show_after_show(self) -> None: self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart.show() with self.assertRaises(AssertionError): self.chart.show() def test_show_after_repr_html(self) -> None: self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart._repr_html_() # pylint: disable=protected-access with self.assertRaises(AssertionError): self.chart.show() def test_animate_after_repr_html(self) -> None: self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart._repr_html_() # pylint: disable=protected-access with self.assertRaises(AssertionError): self.chart.animate(Snapshot("abc1234")) def test_animate_after_show(self) -> None: self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart.show() with self.assertRaises(AssertionError): self.chart.animate(Snapshot("abc1234")) def test_feature_after_repr_html(self) -> None: self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart._repr_html_() # pylint: disable=protected-access with self.assertRaises(AssertionError): self.chart.feature("tooltip", True) def test_feature_after_show(self) -> None: self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart.show() with self.assertRaises(AssertionError): self.chart.feature("tooltip", True) def test_store_after_repr_html_(self) -> None: self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart._repr_html_() # pylint: disable=protected-access with self.assertRaises(AssertionError): self.chart.store() def test_store_after_show(self) -> None: self.chart = Chart(display="manual") self.chart.animate(Snapshot("abc1234")) self.chart.show() with self.assertRaises(AssertionError): self.chart.store() tests/test_animationcontrol.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import unittest import unittest.mock from ipyvizzu import Style from tests.test_chart import TestChart class TestAnimationControl(TestChart): def test_must_be_called_after_animate(self) -> None: with self.assertRaises(AssertionError): self.chart.control.seek("50%") def test_cancel(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.animate(Style(None)) self.chart.control.cancel() self.assertEqual( self.normalizer.normalize_output(output, 1), "window.ipyvizzu.control(element, 'cancel', id, id);", ) def test_pause(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.animate(Style(None)) self.chart.control.pause() self.assertEqual( self.normalizer.normalize_output(output, 1), "window.ipyvizzu.control(element, 'pause', id, id);", ) def test_play(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.animate(Style(None)) self.chart.control.play() self.assertEqual( self.normalizer.normalize_output(output, 1), "window.ipyvizzu.control(element, 'play', id, id);", ) def test_reverse(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.animate(Style(None)) self.chart.control.reverse() self.assertEqual( self.normalizer.normalize_output(output, 1), "window.ipyvizzu.control(element, 'reverse', id, id);", ) def test_seek(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.animate(Style(None)) self.chart.control.seek("50%") self.assertEqual( self.normalizer.normalize_output(output, 1), "window.ipyvizzu.control(element, 'seek', id, id, '50%');", ) def test_stop(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.animate(Style(None)) self.chart.control.stop() self.assertEqual( self.normalizer.normalize_output(output, 1), "window.ipyvizzu.control(element, 'stop', id, id);", ) def test_store(self) -> None: with unittest.mock.patch(self.mock) as output: self.chart.animate(Style(None)) self.chart.control.store() self.assertEqual( self.normalizer.normalize_output(output, 1), "window.ipyvizzu.control(element, 'store', id, id, id);", ) tests/test_animation.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring import json import pathlib from typing import List import unittest import jsonschema # type: ignore import numpy as np import pandas as pd from ipyvizzu.data.type_alias import Record from ipyvizzu import ( Animation, AnimationMerger, Config, Data, Keyframe, PlainAnimation, Snapshot, Style, ) from tests.utils.import_error import RaiseImportError class TestPlainAnimation(unittest.TestCase): def test_plainanimation(self) -> None: animation = PlainAnimation(geometry="circle") self.assertEqual({"geometry": "circle"}, animation.build()) class TestDataSchema(unittest.TestCase): def setUp(self) -> None: self.data = Data() def test_schema_dimension_only(self) -> None: self.data.add_dimension("Genres", ["Pop", "Rock"]) with self.assertRaises(jsonschema.ValidationError): self.data.build() def test_schema_measure_only(self) -> None: self.data.add_measure("Popularity", [[114, 96]]) with self.assertRaises(jsonschema.ValidationError): self.data.build() def test_schema_data_cube_and_series(self) -> None: self.data.add_dimension("Genres", ["Pop", "Rock"]) self.data.add_measure("Popularity", [[114, 96]]) self.data.add_series("Kinds", ["Hard"]) with self.assertRaises(jsonschema.ValidationError): self.data.build() def test_schema_data_cube_and_records(self) -> None: self.data.add_dimension("Genres", ["Pop", "Rock"]) self.data.add_measure("Popularity", [[114, 96]]) self.data.add_records([["Rock", "Hard", 96], ["Pop", "Hard", 114]]) with self.assertRaises(jsonschema.ValidationError): self.data.build() class TestDataClassmethods(unittest.TestCase): asset_dir: pathlib.Path @classmethod def setUpClass(cls) -> None: cls.asset_dir = pathlib.Path(__file__).parent / "assets" def test_filter(self) -> None: data = Data.filter("filter_expr") # instead of build() test with dump() because contains raw js self.assertEqual( '{"data": {"filter": record => { return (filter_expr) }}}', data.dump(), ) def test_filter_multiline(self) -> None: filter_expr = """ A && B || C """ data = Data.filter(filter_expr) # instead of build() test with dump() because contains raw js self.assertEqual( '{"data": {"filter": record => { return (A && B || C) }}}', data.dump(), ) def test_filter_can_be_none(self) -> None: data = Data.filter(None) # instead of build() test with dump() because contains raw js self.assertEqual( '{"data": {"filter": null}}', data.dump(), ) def test_from_json(self) -> None: data = Data.from_json(self.asset_dir / "data_from_json.json") self.assertEqual( { "data": { "dimensions": [ {"name": "Genres", "values": ["Rock", "Pop"]}, {"name": "Kinds", "values": ["Hard"]}, ], "measures": [{"name": "Popularity", "values": [[114, 96]]}], } }, data.build(), ) class TestData(unittest.TestCase): def setUp(self) -> None: self.data = Data() def test_set_filter(self) -> None: self.data.add_records([["Rock", "Hard", 96], ["Pop", "Hard", 114]]) self.data.set_filter("filter_expr") self.assertEqual( '{"data": {"records": ' + '[["Rock", "Hard", 96], ["Pop", "Hard", 114]], ' + '"filter": record => { return (filter_expr) }}}', self.data.dump(), ) def test_set_filter_can_be_none(self) -> None: self.data.add_records([["Rock", "Hard", 96], ["Pop", "Hard", 114]]) self.data.set_filter(None) self.assertEqual( '{"data": {"records": [["Rock", "Hard", 96], ["Pop", "Hard", 114]], "filter": null}}', self.data.dump(), ) def test_record_list(self) -> None: self.data.add_record(["Rock", "Hard", 96]) self.data.add_record(["Pop", "Hard", 114]) self.assertEqual( {"data": {"records": [["Rock", "Hard", 96], ["Pop", "Hard", 114]]}}, self.data.build(), ) def test_record_dict(self) -> None: self.data.add_record({"Genres": "Rock", "Kinds": "Hard", "Popularity": 96}) self.data.add_record({"Genres": "Pop", "Kinds": "Hard", "Popularity": 114}) self.assertEqual( { "data": { "records": [ {"Genres": "Rock", "Kinds": "Hard", "Popularity": 96}, {"Genres": "Pop", "Kinds": "Hard", "Popularity": 114}, ] } }, self.data.build(), ) def test_records(self) -> None: self.data.add_records([["Rock", "Hard", 96], ["Pop", "Hard", 114]]) self.assertEqual( {"data": {"records": [["Rock", "Hard", 96], ["Pop", "Hard", 114]]}}, self.data.build(), ) def test_series(self) -> None: self.data.add_series("Genres", ["Rock", "Pop"], type="dimension") self.data.add_series("Kinds", ["Hard"]) self.data.add_series("Popularity", [96, 114], type="measure") self.assertEqual( { "data": { "series": [ { "name": "Genres", "type": "dimension", "values": ["Rock", "Pop"], }, {"name": "Kinds", "values": ["Hard"]}, {"name": "Popularity", "type": "measure", "values": [96, 114]}, ] } }, self.data.build(), ) def test_series_without_values(self) -> None: self.data.add_series("Genres", type="dimension") self.data.add_series("Kinds", type="dimension") self.data.add_series("Popularity", type="measure") records: List[Record] = [["Rock", "Hard", 96], ["Pop", "Hard", 114]] self.data.add_records(records) self.assertEqual( { "data": { "records": [["Rock", "Hard", 96], ["Pop", "Hard", 114]], "series": [ {"name": "Genres", "type": "dimension"}, {"name": "Kinds", "type": "dimension"}, {"name": "Popularity", "type": "measure"}, ], } }, self.data.build(), ) def test_data_cube(self) -> None: self.data.add_dimension("Genres", ["Pop", "Rock"]) self.data.add_dimension("Kinds", ["Hard"]) self.data.add_measure("Popularity", [[114, 96]]) self.assertEqual( { "data": { "dimensions": [ {"name": "Genres", "values": ["Pop", "Rock"]}, {"name": "Kinds", "values": ["Hard"]}, ], "measures": [ { "name": "Popularity", "values": [[114, 96]], } ], } }, self.data.build(), ) class TestDataAddDf(unittest.TestCase): asset_dir: pathlib.Path @classmethod def setUpClass(cls) -> None: cls.asset_dir = pathlib.Path(__file__).parent / "assets" def setUp(self) -> None: self.data = Data() def test_add_df_with_none(self) -> None: data = Data() data.add_df(None) self.assertEqual( {"data": {}}, data.build(), ) def test_add_df_with_df(self) -> None: with open(self.asset_dir / "df_in.json", encoding="utf8") as fh_in: fc_in = json.load(fh_in) with open(self.asset_dir / "df_out.json", encoding="utf8") as fh_out: fc_out = json.load(fh_out) df = pd.DataFrame(fc_in) df = df.astype({"PopularityAsDimension": str}) self.data.add_df(df) self.assertEqual( fc_out, self.data.build(), ) def test_add_df_with_df_contains_na(self) -> None: df = pd.read_csv( self.asset_dir / "df_na.csv", dtype={"PopularityAsDimension": str} ) self.data.add_df(df) self.assertEqual( { "data": { "series": [ { "name": "Popularity", "type": "measure", "values": [100.0, 0.0], }, { "name": "PopularityAsDimension", "type": "dimension", "values": ["", "100"], }, ] } }, self.data.build(), ) def test_add_df_with_series(self) -> None: data = Data() data.add_df(pd.Series([1, 2], name="series1")) data.add_df( pd.Series({"x": 3, "y": 4, "z": 5}, index=["x", "y"], name="series2") ) self.assertEqual( { "data": { "series": [ {"name": "series1", "type": "measure", "values": [1.0, 2.0]}, {"name": "series2", "type": "measure", "values": [3.0, 4.0]}, ] } }, data.build(), ) def test_add_df_with_df_and_with_include_index(self) -> None: data = Data() df = pd.DataFrame({"series": [1, 2, 3]}, index=["x", "y", "z"]) data.add_df(df, include_index="Index") self.assertEqual( { "data": { "series": [ { "name": "Index", "type": "dimension", "values": ["x", "y", "z"], }, { "name": "series", "type": "measure", "values": [1.0, 2.0, 3.0], }, ] } }, data.build(), ) def test_add_df_with_series_and_with_include_index(self) -> None: data = Data() df = pd.Series({"x": 1, "y": 2, "z": 3}, index=["x", "y"], name="series") data.add_df(df, include_index="Index") self.assertEqual( { "data": { "series": [ {"name": "Index", "type": "dimension", "values": ["x", "y"]}, {"name": "series", "type": "measure", "values": [1.0, 2.0]}, ] } }, data.build(), ) def test_add_df_index(self) -> None: data = Data() df = pd.Series({"x": 1, "y": 2, "z": 3}, index=["x", "y"], name="series") data.add_df_index(df, column_name="Index") data.add_df(df) self.assertEqual( { "data": { "series": [ {"name": "Index", "type": "dimension", "values": ["x", "y"]}, {"name": "series", "type": "measure", "values": [1.0, 2.0]}, ] } }, data.build(), ) def test_add_df_index_with_none(self) -> None: data = Data() df = pd.DataFrame() data.add_df_index(df, column_name="Index") data.add_df(df) self.assertEqual( {"data": {}}, data.build(), ) def test_add_df_if_pandas_not_installed(self) -> None: with RaiseImportError.module_name("pandas"): data = Data() with self.assertRaises(ImportError): data.add_df(pd.DataFrame()) class TestDataAddDataframe(unittest.TestCase): asset_dir: pathlib.Path @classmethod def setUpClass(cls) -> None: cls.asset_dir = pathlib.Path(__file__).parent / "assets" def setUp(self) -> None: self.data = Data() def test_add_data_frame_with_none(self) -> None: data = Data() data.add_data_frame(None) self.assertEqual( {"data": {}}, data.build(), ) def test_add_data_frame_with_df(self) -> None: with open(self.asset_dir / "df_in.json", encoding="utf8") as fh_in: fc_in = json.load(fh_in) with open(self.asset_dir / "df_out.json", encoding="utf8") as fh_out: fc_out = json.load(fh_out) df = pd.DataFrame(fc_in) df = df.astype({"PopularityAsDimension": str}) self.data.add_data_frame(df) self.assertEqual( fc_out, self.data.build(), ) def test_add_data_frame_with_df_contains_na(self) -> None: df = pd.read_csv( self.asset_dir / "df_na.csv", dtype={"PopularityAsDimension": str} ) self.data.add_data_frame(df) self.assertEqual( { "data": { "series": [ { "name": "Popularity", "type": "measure", "values": [100.0, 0.0], }, { "name": "PopularityAsDimension", "type": "dimension", "values": ["", "100"], }, ] } }, self.data.build(), ) def test_add_data_frame_with_series(self) -> None: data = Data() data.add_data_frame(pd.Series([1, 2], name="series1")) data.add_data_frame( pd.Series({"x": 3, "y": 4, "z": 5}, index=["x", "y"], name="series2") ) self.assertEqual( { "data": { "series": [ {"name": "series1", "type": "measure", "values": [1.0, 2.0]}, {"name": "series2", "type": "measure", "values": [3.0, 4.0]}, ] } }, data.build(), ) def test_add_data_frame_index(self) -> None: data = Data() df = pd.Series({"x": 1, "y": 2, "z": 3}, index=["x", "y"], name="series") data.add_data_frame_index(df, name="Index") data.add_data_frame(df) self.assertEqual( { "data": { "series": [ {"name": "Index", "type": "dimension", "values": ["x", "y"]}, {"name": "series", "type": "measure", "values": [1.0, 2.0]}, ] } }, data.build(), ) class TestDataAddNpArray(unittest.TestCase): def test_add_np_array_none(self) -> None: data = Data() data.add_np_array(None) self.assertEqual( {"data": {}}, data.build(), ) def test_add_np_array_empty(self) -> None: np_array = np.empty([]) data = Data() data.add_np_array(np_array) self.assertEqual( {"data": {}}, data.build(), ) def test_add_np_array1dim(self) -> None: np_array = np.array([127, 128, 129]) data = Data() data.add_np_array(np_array) self.assertEqual( { "data": { "series": [ {"name": "0", "type": "measure", "values": [127, 128, 129]}, ] } }, data.build(), ) def test_add_np_array1dim_with_str_value(self) -> None: np_array = np.array([127, "128", 129]) data = Data() data.add_np_array(np_array) self.assertEqual( { "data": { "series": [ { "name": "0", "type": "dimension", "values": ["127", "128", "129"], }, ] } }, data.build(), ) def test_add_np_array1dim_with_str_and_na_value_and_column_name_and_dtype( self, ) -> None: np_array = np.array([127, "128", np.nan]) data = Data() data.add_np_array(np_array, column_name="First", column_dtype=int) self.assertEqual( { "data": { "series": [ { "name": "First", "type": "measure", "values": [127, 128, 0], }, ] } }, data.build(), ) def test_add_np_array2dim(self) -> None: np_array = np.array([[127, 128, 129], [255, 256, 257], [511, 512, 513]]) data = Data() data.add_np_array(np_array) self.assertEqual( { "data": { "series": [ {"name": "0", "type": "measure", "values": [127, 255, 511]}, {"name": "1", "type": "measure", "values": [128, 256, 512]}, {"name": "2", "type": "measure", "values": [129, 257, 513]}, ] } }, data.build(), ) def test_add_np_array2dim_with_str_and_na_value_and_column_name_and_dtype( self, ) -> None: np_array = np.array([[127, "128", 129], [255, np.nan, 257], [511, 512, 513]]) data = Data() data.add_np_array(np_array, column_name={0: "First"}, column_dtype={2: int}) self.assertEqual( { "data": { "series": [ { "name": "First", "type": "dimension", "values": ["127", "255", "511"], }, { "name": "1", "type": "dimension", "values": ["128", "", "512"], }, { "name": "2", "type": "measure", "values": [129, 257, 513], }, ] } }, data.build(), ) def test_add_np_array2dim_with_non_dict_column_name(self) -> None: np_array = np.zeros((2, 2)) data = Data() with self.assertRaises(ValueError): data.add_np_array(np_array, column_name="First") def test_add_np_array2dim_with_non_dict_column_dtype(self) -> None: np_array = np.zeros((2, 2)) data = Data() with self.assertRaises(ValueError): data.add_np_array(np_array, column_dtype=str) def test_add_np_array3dim(self) -> None: np_array = np.zeros((3, 3, 3)) data = Data() with self.assertRaises(ValueError): data.add_np_array(np_array) def test_add_df_if_numpy_not_installed(self) -> None: with RaiseImportError.module_name("numpy"): data = Data() with self.assertRaises(ImportError): data.add_np_array(np.empty(())) class TestConfig(unittest.TestCase): def test_config(self) -> None: animation = Config({"color": {"set": ["Genres"]}}) self.assertEqual({"config": {"color": {"set": ["Genres"]}}}, animation.build()) def test_config_preset(self) -> None: animation = Config.column({"x": "foo", "y": "bar"}) # instead of build() test with dump() because contains raw js self.assertEqual( "{\"config\": lib.presets.column({'x': 'foo', 'y': 'bar'})}", animation.dump(), ) class TestStyle(unittest.TestCase): def test_style(self) -> None: animation = Style({"title": {"backgroundColor": "#A0A0A0"}}) self.assertEqual( {"style": {"title": {"backgroundColor": "#A0A0A0"}}}, animation.build() ) def test_style_can_be_none(self) -> None: animation = Style(None) self.assertEqual({"style": None}, animation.build()) class TestKeyframe(unittest.TestCase): def test_animation_has_to_be_passed_even_if_options_passed( self, ) -> None: with self.assertRaises(ValueError): Keyframe(duration="500ms") def test_keyframe_cannot_be_passed( self, ) -> None: with self.assertRaises(ValueError): Keyframe(Keyframe(Style(None))) def test_animation_and_snapshot_cannot_be_passed( self, ) -> None: with self.assertRaises(ValueError): Keyframe(Keyframe(Style(None), Snapshot("abc123"))) def test_animation_and_stored_animation_cannot_be_passed( self, ) -> None: with self.assertRaises(ValueError): Keyframe(Keyframe(Style(None), Animation("abc123"))) def test_keyframe(self) -> None: animation = Keyframe( Data.filter(None), Style(None), Config({"title": "Keyframe"}) ) self.assertEqual( animation.build(), { "target": { "config": {"title": "Keyframe"}, "data": {"filter": None}, "style": None, } }, ) def test_keyframe_with_snapshot(self) -> None: animation = Keyframe(Snapshot("abc123")) self.assertEqual( animation.build(), { "target": "abc123", }, ) def test_keyframe_with_stored_animation(self) -> None: animation = Keyframe(Animation("abc123")) self.assertEqual( animation.build(), { "target": "abc123", }, ) def test_keyframe_with_options(self) -> None: animation = Keyframe( Data.filter(None), Style(None), Config({"title": "Keyframe"}), duration=1 ) self.assertEqual( animation.build(), { "target": { "config": {"title": "Keyframe"}, "data": {"filter": None}, "style": None, }, "options": {"duration": 1}, }, ) class TestSnapshot(unittest.TestCase): def test_snapshot(self) -> None: animation = Snapshot("abc1234") self.assertEqual("abc1234", animation.build()) def test_snapshot_dump(self) -> None: animation = Snapshot("abc1234") self.assertEqual('"abc1234"', animation.dump()) class TestAnimation(unittest.TestCase): def test_animation(self) -> None: animation = Animation("abc1234") self.assertEqual("abc1234", animation.build()) def test_animation_dump(self) -> None: animation = Animation("abc1234") self.assertEqual('"abc1234"', animation.dump()) class TestMerger(unittest.TestCase): def setUp(self) -> None: self.merger = AnimationMerger() self.data = Data() self.data.add_record(["Rock", "Hard", 96]) self.config = Config({"channels": {"label": {"attach": ["Popularity"]}}}) def test_merge(self) -> None: self.merger.merge(self.data) self.merger.merge(self.config) self.merger.merge(Style({"title": {"backgroundColor": "#A0A0A0"}})) self.assertEqual( json.dumps( { "data": {"records": [["Rock", "Hard", 96]]}, "config": {"channels": {"label": {"attach": ["Popularity"]}}}, "style": {"title": {"backgroundColor": "#A0A0A0"}}, } ), self.merger.dump(), ) def test_merge_none(self) -> None: self.merger.merge(self.config) self.merger.merge(Style(None)) self.assertEqual( '{"config": {"channels": {"label": {"attach": ["Popularity"]}}}, "style": null}', self.merger.dump(), ) def test_snapshot_can_not_be_merged(self) -> None: self.merger.merge(self.data) self.merger.merge(self.config) self.merger.merge(Style({"title": {"backgroundColor": "#A0A0A0"}})) self.assertRaises(ValueError, self.merger.merge, Snapshot("abc1234")) def test_stored_animation_can_not_be_merged(self) -> None: self.merger.merge(self.data) self.merger.merge(self.config) self.merger.merge(Style({"title": {"backgroundColor": "#A0A0A0"}})) self.assertRaises(ValueError, self.merger.merge, Animation("abc1234")) def test_only_different_animations_can_be_merged(self) -> None: self.merger.merge(self.data) self.merger.merge(self.config) self.merger.merge(Style({"title": {"backgroundColor": "#A0A0A0"}})) data = Data() data.add_record(["Pop", "Hard", 114]) self.assertRaises(ValueError, self.merger.merge, data) self.assertRaises(ValueError, self.merger.merge, Config({"title": "Test"})) self.assertRaises(ValueError, self.merger.merge, Style(None)) def test_merge_keyframes(self) -> None: self.merger.merge(Keyframe(Style(None))) self.merger.merge(Keyframe(Style(None), duration=0)) self.merger.merge(Keyframe(Style(None))) self.assertEqual( self.merger.dump(), json.dumps( [ {"target": {"style": None}}, {"target": {"style": None}, "options": {"duration": 0}}, {"target": {"style": None}}, ] ), ) def test_keyframe_and_animation_can_not_be_merged(self) -> None: self.merger.merge(Keyframe(Style(None))) self.assertRaises(ValueError, self.merger.merge, self.data) def test_animation_and_keyframe_can_not_be_merged(self) -> None: self.merger.merge(self.data) self.assertRaises(ValueError, self.merger.merge, Keyframe(Style(None))) def test_merge_animations_keyframe(self) -> None: animations = tuple([Keyframe(Style(None))]) merger = AnimationMerger.merge_animations(animations) self.assertEqual( merger.dump(), json.dumps( [ {"target": {"style": None}}, ] ), ) tests/__init__.py METASEP # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring tests/test_data/test_pandas.py METASEP
[ { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df_index(df, column_name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndex(DataWithAssets):\n def test_add_data_frame_index_with_none(self) -> None:\n self.data.add_data_frame_index(None, name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_data_frame_index(df, name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDfIndexWithSeries(DataWithAssets):\n def test_add_df_index_with_series(self) -> None:\n self.data.add_df_index(\n self.in_pd_series_dimension_with_index,\n column_name=\"DimensionIndex\",\n )\n self.data.add_df_index(\n self.in_pd_series_measure_with_index,\n column_name=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_only_index,", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df_index(df, column_name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndex(DataWithAssets):\n def test_add_data_frame_index_with_none(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df_index(df, column_name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndex(DataWithAssets):\n def test_add_data_frame_index_with_none(self) -> None:\n self.data.add_data_frame_index(None, name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df_index(df, column_name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndex(DataWithAssets):\n def test_add_data_frame_index_with_none(self) -> None:\n self.data.add_data_frame_index(None, name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_data_frame_index(df, name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDfIndexWithSeries(DataWithAssets):\n def test_add_df_index_with_series(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df_index(df, column_name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndex(DataWithAssets):\n def test_add_data_frame_index_with_none(self) -> None:\n self.data.add_data_frame_index(None, name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_data_frame_index(df, name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDfIndexWithSeries(DataWithAssets):\n def test_add_df_index_with_series(self) -> None:\n self.data.add_df_index(\n self.in_pd_series_dimension_with_index,\n column_name=\"DimensionIndex\",\n )\n self.data.add_df_index(\n self.in_pd_series_measure_with_index,\n column_name=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndexWithSeries(DataWithAssets):\n def test_add_data_frame_index_with_series(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:", "type": "inproject" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df_index(df, column_name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndex(DataWithAssets):\n def test_add_data_frame_index_with_none(self) -> None:\n self.data.add_data_frame_index(None, name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n", "type": "non_informative" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n", "type": "non_informative" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n", "type": "non_informative" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df_index(df, column_name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndex(DataWithAssets):\n def test_add_data_frame_index_with_none(self) -> None:\n self.data.add_data_frame_index(None, name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_data_frame_index(df, name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDfIndexWithSeries(DataWithAssets):\n def test_add_df_index_with_series(self) -> None:\n self.data.add_df_index(\n self.in_pd_series_dimension_with_index,\n column_name=\"DimensionIndex\",\n )\n self.data.add_df_index(\n self.in_pd_series_measure_with_index,\n column_name=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndexWithSeries(DataWithAssets):", "type": "non_informative" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets", "type": "non_informative" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df_index(df, column_name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndex(DataWithAssets):\n def test_add_data_frame_index_with_none(self) -> None:\n self.data.add_data_frame_index(None, name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_data_frame_index(df, name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDfIndexWithSeries(DataWithAssets):\n def test_add_df_index_with_series(self) -> None:\n self.data.add_df_index(", "type": "random" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df_index(df, column_name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndex(DataWithAssets):\n def test_add_data_frame_index_with_none(self) -> None:\n self.data.add_data_frame_index(None, name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_data_frame_index(df, name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDfIndexWithSeries(DataWithAssets):\n def test_add_df_index_with_series(self) -> None:\n self.data.add_df_index(\n self.in_pd_series_dimension_with_index,\n column_name=\"DimensionIndex\",\n )\n self.data.add_df_index(\n self.in_pd_series_measure_with_index,", "type": "random" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df_index(df, column_name=\"Index\")\n self.assertEqual(", "type": "random" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)", "type": "random" }, { "content": "# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring\n\nimport pandas as pd\n\nfrom tests.test_data import DataWithAssets\nfrom tests.utils.import_error import RaiseImportError\n\n\nclass TestDf(DataWithAssets):\n def test_add_df_if_pandas_not_installed(self) -> None:\n with RaiseImportError.module_name(\"pandas\"):\n with self.assertRaises(ImportError):\n self.data.add_df(pd.DataFrame())\n\n def test_add_df_with_none(self) -> None:\n self.data.add_df(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_empty_df(self) -> None:\n self.data.add_df(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_df_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_df(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_with_include_index(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df(df, include_index=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_with_index,\n self.data.build(),\n )\n\n def test_add_df_with_df_and_max_rows(self) -> None:\n df = self.in_pd_df_by_series\n self.data.add_df(df, max_rows=2)\n self.assertEqual(\n self.ref_pd_df_by_series_max_rows,\n self.data.build(),\n )\n\n\nclass TestDataFrame(DataWithAssets):\n def test_add_data_frame_with_none(self) -> None:\n self.data.add_data_frame(None)\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_empty_df(self) -> None:\n self.data.add_data_frame(pd.DataFrame())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_duplicated_popularity\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_duplicated_popularity,\n self.data.build(),\n )\n\n def test_add_data_frame_with_df_contains_na(self) -> None:\n df = self.in_pd_df_by_series_with_nan\n self.data.add_data_frame(df)\n self.assertEqual(\n self.ref_pd_df_by_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfWithSeries(DataWithAssets):\n def test_add_df_with_empty_series(self) -> None:\n self.data.add_df(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_with_series(self) -> None:\n self.data.add_df(self.in_pd_series_dimension)\n self.data.add_df(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_df_with_series_contains_na(self) -> None:\n self.data.add_df(self.in_pd_series_dimension_with_nan)\n self.data.add_df(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n def test_add_df_with_series_and_with_include_index(self) -> None:\n self.data.add_df(\n self.in_pd_series_dimension_with_index,\n include_index=\"DimensionIndex\",\n )\n self.data.add_df(\n self.in_pd_series_measure_with_index,\n include_index=\"MeasureIndex\",\n )\n self.assertEqual(\n self.ref_pd_series_with_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameWithSeries(DataWithAssets):\n def test_add_data_frame_with_empty_series(self) -> None:\n self.data.add_data_frame(pd.Series())\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_data_frame_with_series(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension)\n self.data.add_data_frame(self.in_pd_series_measure)\n self.assertEqual(\n self.ref_pd_series,\n self.data.build(),\n )\n\n def test_add_data_frame_with_series_contains_na(self) -> None:\n self.data.add_data_frame(self.in_pd_series_dimension_with_nan)\n self.data.add_data_frame(self.in_pd_series_measure_with_nan)\n self.assertEqual(\n self.ref_pd_series_with_nan,\n self.data.build(),\n )\n\n\nclass TestDfIndex(DataWithAssets):\n def test_add_df_index_with_none(self) -> None:\n self.data.add_df_index(None, column_name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),\n )\n\n def test_add_df_index_with_df(self) -> None:\n df = self.in_pd_df_by_series_with_index\n self.data.add_df_index(df, column_name=\"Index\")\n self.assertEqual(\n self.ref_pd_df_by_series_only_index,\n self.data.build(),\n )\n\n\nclass TestDataFrameIndex(DataWithAssets):\n def test_add_data_frame_index_with_none(self) -> None:\n self.data.add_data_frame_index(None, name=\"Index\")\n self.assertEqual(\n {\"data\": {}},\n self.data.build(),", "type": "random" } ]
[ " self.data.build(),", " self.data.add_data_frame(pd.Series())", " with RaiseImportError.module_name(\"pandas\"):", " self.data.add_df(pd.DataFrame())", " self.data.add_data_frame(self.in_pd_series_dimension)", " self.data.add_data_frame(self.in_pd_series_measure)", " self.data.add_df(None)", " self.data.add_data_frame(self.in_pd_series_dimension_with_nan)", " self.data.add_data_frame(self.in_pd_series_measure_with_nan)", " self.data.add_df(df)", " self.data.add_df_index(None, column_name=\"Index\")", " self.data.add_df_index(df, column_name=\"Index\")", " self.data.add_df(df, include_index=\"Index\")", " self.data.add_data_frame_index(None, name=\"Index\")", " self.data.add_df(df, max_rows=2)", " self.data.add_data_frame_index(df, name=\"Index\")", " self.data.add_data_frame(None)", " self.data.add_df_index(", " self.data.add_data_frame(pd.DataFrame())", " self.data.add_data_frame(df)", " self.data.add_data_frame_index(", " self.data.add_df(pd.Series())", " self.data.add_df(self.in_pd_series_dimension)", " self.data.add_df(self.in_pd_series_measure)", " self.data.add_df(self.in_pd_series_dimension_with_nan)", " self.data.add_df(self.in_pd_series_measure_with_nan)", " self.data.add_df(", " def test_add_data_frame_index_with_df(self) -> None:", "class TestDfIndex(DataWithAssets):", "", " def test_add_data_frame_index_with_series(self) -> None:", "from tests.utils.import_error import RaiseImportError", " self.in_pd_series_dimension_with_index,", " column_name=\"MeasureIndex\",", " self.ref_pd_df_by_series_only_index,", " self.assertEqual(", " )" ]
METASEP
20
mage-ai__mage-ai
mage-ai__mage-ai METASEP cleaning/__init__.py METASEP src/data_cleaner/transformer_actions/udf/substring.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Substring(BaseUDF): def execute(self): start = self.options.get('start') stop = self.options.get('stop') if start is None and stop is None: raise Exception('Require at least one of `start` and `stop` parameters.') return self.df[self.arguments[0]].str.slice(start=start, stop=stop) src/data_cleaner/transformer_actions/udf/string_split.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class StringSplit(BaseUDF): def execute(self): separator = self.options.get('separator') part_index = self.options.get('part_index') if separator is None or part_index is None: raise Exception('Require both `separator` and `part_index` parameters.') return self.df[self.arguments[0]].str.split(separator).str[part_index].str.strip() src/data_cleaner/transformer_actions/udf/string_replace.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class StringReplace(BaseUDF): def execute(self): pattern = self.options.get('pattern') replacement = self.options.get('replacement') if not pattern and not replacement: raise Exception(f'Require both `pattern` and `replacement` parameters.') return self.df[self.arguments[0]].str.replace(pattern, replacement) src/data_cleaner/transformer_actions/udf/multiply.py METASEP from transformer_actions.udf.base import BaseUDF class Multiply(BaseUDF): def execute(self): col1 = self.arguments[0] if len(self.arguments) > 1: col2 = self.arguments[1] return self.df[col1].astype(float) * self.df[col2].astype(float) elif self.options.get('value') is not None: return self.df[col1] * float(self.options['value']) raise Exception('Require second column or a value to multiply.') src/data_cleaner/transformer_actions/udf/if_else.py METASEP from data_cleaner.transformer_actions.action_code import query_with_action_code from data_cleaner.transformer_actions.udf.base import BaseUDF class IfElse(BaseUDF): def execute(self): df_copy = self.df.copy() true_index = query_with_action_code(df_copy, self.code, self.kwargs).index arg1_type = self.options.get('arg1_type', 'value') arg2_type = self.options.get('arg2_type', 'value') arg1 = self.arguments[0] if arg1_type == 'column': arg1 = df_copy[arg1] arg2 = self.arguments[1] if arg2_type == 'column': arg2 = df_copy[arg2] df_copy.loc[true_index, 'result'] = arg1 df_copy['result'] = df_copy['result'].fillna(arg2) return df_copy['result'] src/data_cleaner/transformer_actions/udf/formatted_date.py METASEP from transformer_actions.udf.base import BaseUDF import pandas as pd class FormattedDate(BaseUDF): def execute(self): return pd.to_datetime( self.df[self.arguments[0]], ).dt.strftime(self.options['format']) src/data_cleaner/transformer_actions/udf/divide.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Divide(BaseUDF): def execute(self): col1 = self.arguments[0] if len(self.arguments) > 1: col2 = self.arguments[1] return self.df[col1].astype(float) / self.df[col2].astype(float) elif self.options.get('value') is not None: return self.df[col1] / float(self.options['value']) raise Exception('Require second column or a value to divide.') src/data_cleaner/transformer_actions/udf/distance_between.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np EARTH_RADIUS = 6371 class DistanceBetween(BaseUDF): def execute(self): def __haversine(lat1, lng1, lat2, lng2): lat1, lng1, lat2, lng2 = np.radians([lat1, lng1, lat2, lng2]) a = np.sin((lat2-lat1)/2.0)**2 + \ np.cos(lat1) * np.cos(lat2) * np.sin((lng2-lng1)/2.0)**2 return EARTH_RADIUS * 2 * np.arcsin(np.sqrt(a)) return __haversine( self.df[self.arguments[0]], self.df[self.arguments[1]], self.df[self.arguments[2]], self.df[self.arguments[3]], ) src/data_cleaner/transformer_actions/udf/difference.py METASEP from data_cleaner.column_type_detector import DATETIME from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np import pandas as pd class Difference(BaseUDF): def execute(self): col1 = self.arguments[0] column_type = self.options.get('column_type', self.kwargs.get('column_types', {}).get(col1)) if len(self.arguments) > 1: col2 = self.arguments[1] return self.__difference_between_columns( self.df[col1], self.df[col2], column_type=column_type, options=self.options, ) elif self.options.get('value') is not None: return self.__subtract_value( self.df[col1], self.options['value'], column_type=column_type, options=self.options, ) raise Exception('Require second column or a value to minus.') def __difference_between_columns(self, column1, column2, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return (pd.to_datetime(column1, utc=True) - pd.to_datetime(column2, utc=True)).dt.days return column1 - column2 def __subtract_value(self, original_column, value, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return ( pd.to_datetime(original_column, utc=True) - pd.to_timedelta(value, unit=time_unit) ).dt.strftime('%Y-%m-%d %H:%M:%S') return original_column - value src/data_cleaner/transformer_actions/udf/date_trunc.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np import pandas as pd class DateTrunc(BaseUDF): def execute(self): date_part = self.options['date_part'] date_column = self.arguments[0] df_copy = self.df.copy() df_copy[date_column] = pd.to_datetime(df_copy[date_column]) if date_part == 'week': return (df_copy[date_column] - df_copy[date_column].dt.weekday * np.timedelta64(1, 'D')).\ dt.strftime('%Y-%m-%d') raise Exception(f'Date part {date_part} is not supported.') src/data_cleaner/transformer_actions/udf/constant.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Constant(BaseUDF): def execute(self): return self.arguments[0] src/data_cleaner/transformer_actions/udf/base.py METASEP import importlib class BaseUDF(): def __init__(self, df, arguments=[], code=None, options={}, kwargs={}): self.df = df self.arguments = arguments self.code = code self.options = options self.kwargs = kwargs def execute(self): pass def execute_udf(udf_name, df, arguments, code, options, kwargs): udf_class = getattr( importlib.import_module(f'transformer_actions.udf.{udf_name}'), udf_name.title().replace('_', ''), ) return udf_class(df, arguments, code, options, kwargs).execute() src/data_cleaner/transformer_actions/udf/addition.py METASEP from data_cleaner.column_type_detector import DATETIME from data_cleaner.transformer_actions.udf.base import BaseUDF import pandas as pd class Addition(BaseUDF): def execute(self): col1 = self.arguments[0] df_result = self.df[col1] column_type = self.options.get("column_type", self.kwargs.get('column_types', {}).get(col1)) if len(self.arguments) == 1 and 'value' not in self.options: raise Exception('Require second column or a value to add.') if len(self.arguments) > 1: for col in self.arguments[1:]: df_result = df_result + self.df[col] if self.options.get('value') is not None: df_result = self.__add_value( df_result, self.options['value'], column_type=column_type, options=self.options, ) return df_result def __add_value(self, original_column, value, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return ( pd.to_datetime(original_column, utc=True) + pd.to_timedelta(value, unit=time_unit) ).dt.strftime('%Y-%m-%d %H:%M:%S') return original_column + value src/data_cleaner/transformer_actions/udf/__init__.py METASEP src/server/hello.py METASEP from flask import Flask app = Flask(__name__) @app.route("/") def hello_world(): return "<p>Hello, World!</p>" src/data_cleaner/transformer_actions/variable_replacer.py METASEP from data_cleaner.transformer_actions.constants import VariableType import re def interpolate(text, key, variable_data): """ text: string to operate on key: key to search within text variable_data: dictionary containing data used to interpolate """ regex_replacement = key if variable_data['type'] == VariableType.FEATURE: regex_replacement = variable_data[VariableType.FEATURE]['uuid'] elif variable_data['type'] == VariableType.FEATURE_SET_VERSION: regex_replacement = \ variable_data[VariableType.FEATURE_SET_VERSION][VariableType.FEATURE_SET]['uuid'] regex_pattern = re.compile( '\%__BRACKETS_START__{}__BRACKETS_END__' .format(key) .replace('__BRACKETS_START__', '\{') .replace('__BRACKETS_END__', '\}') ) return re.sub(regex_pattern, regex_replacement, str(text)) def replace_true_false(action_code): regex_pattern_true = re.compile(' true') regex_pattern_false = re.compile(' false') return re.sub( regex_pattern_true, ' True', re.sub(regex_pattern_false, ' False', action_code), ) src/data_cleaner/transformer_actions/utils.py METASEP from data_cleaner.transformer_actions.constants import ActionType, Axis def columns_to_remove(transformer_actions): arr = filter( lambda x: x['action_type'] == ActionType.REMOVE and x['axis'] == Axis.COLUMN, transformer_actions, ) columns = [] for transformer_action in arr: columns += transformer_action['action_arguments'] return columns src/data_cleaner/transformer_actions/shared.py METASEP TEST_ACTION = dict( action_type='filter', axis='row', action_code='%{1}.%{1_1} == True and (%{1}.%{1_2} == \"The Quant\" or %{1}.%{1_2} == \"Yield\")', action_arguments=[ '%{1}.%{1_1}', '%{2}.%{2_1}', ], action_options=dict( condition='%{1}.%{1_3} >= %{2}.%{2_2} and %{2}.%{2_2} >= %{1}.%{1_3} - 2592000', default=0, timestamp_feature_a='%{1}.%{1_2}', timestamp_feature_b='%{1}.%{1_3}', window=2592000, ), action_variables={ '1': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='omni'), )), '1_1': dict(type='feature', feature=dict(column_type='number', uuid='deposited')), '1_2': dict(type='feature', feature=dict(column_type='category', uuid='fund')), '1_3': dict(type='feature', feature=dict(column_type='category', uuid='delivered_at')), '1_4': dict(type='feature', feature=dict(column_type='number_with_decimals', uuid='amount')), '2': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='magic'), )), '2_1': dict(type='feature', feature=dict(column_type='category', uuid='spell')), '2_2': dict(type='feature', feature=dict(column_type='category', uuid='booked_at')), '3_1': dict(type='feature', feature=dict(column_type='number', uuid='age')), }, ) src/data_cleaner/transformer_actions/row.py METASEP from data_cleaner.column_type_detector import NUMBER_TYPES from data_cleaner.transformer_actions.constants import VariableType from data_cleaner.transformer_actions.action_code import query_with_action_code import pandas as pd def drop_duplicates(df, action, **kwargs): keep = action.get('action_options', {}).get('keep', 'last') return df.drop_duplicates(subset=action['action_arguments'], keep=keep) def filter_rows(df, action, **kwargs): """ df: Pandas DataFrame action: TransformerAction serialized into a dictionary """ action_code = action['action_code'] return query_with_action_code(df, action_code, kwargs) def sort_rows(df, action, **kwargs): ascending = action.get('action_options', {}).get('ascending', True) ascendings = action.get('action_options', {}).get('ascendings', []) if len(ascendings) > 0: ascending = ascendings[0] feature_by_uuid = {} if action.get('action_variables'): for _, val in action['action_variables'].items(): feature = val.get('feature') if feature: feature_by_uuid[feature['uuid']] = feature na_indexes = None as_types = {} for idx, uuid in enumerate(action['action_arguments']): feature = feature_by_uuid.get(uuid) if feature and feature['column_type'] in NUMBER_TYPES: as_types[uuid] = float if idx == 0: na_indexes = df[(df[uuid].isnull()) | (df[uuid].astype(str).str.len() == 0)].index bad_df = None if na_indexes is not None: bad_df = df.index.isin(na_indexes) index = (df[~bad_df] if bad_df is not None else df).astype(as_types).sort_values( by=action['action_arguments'], ascending=ascendings if len(ascendings) > 0 else ascending, ).index df_final = df.loc[index] if bad_df is not None: if ascending: return pd.concat([ df.iloc[bad_df], df_final, ]) return pd.concat([ df_final, df.iloc[bad_df], ]) return df_final src/data_cleaner/transformer_actions/helpers.py METASEP from data_cleaner.column_type_detector import NUMBER, NUMBER_WITH_DECIMALS, TEXT from data_cleaner.transformer_actions.constants import ActionType, Operator, VariableType import numpy as np import re DAY_SECONDS = 86400 HOUR_SECONDS = 3600 def convert_col_type(df_col, col_type): if col_type == NUMBER: return df_col.replace(r'^\s*$', 0, regex=True).fillna(0).astype(np.int64) elif col_type == NUMBER_WITH_DECIMALS: return df_col.dropna().astype(float) elif col_type == TEXT: return df_col.dropna().astype(str) return df_col def convert_value_type(feature_uuid, action, value): action_variables = action.get('action_variables', {}) column_type = None for v in action_variables.values(): if v['type'] == 'feature' and v['feature']['uuid'] == feature_uuid: column_type = v['feature']['column_type'] break if column_type == NUMBER: value = int(value) elif column_type == NUMBER_WITH_DECIMALS: value = float(value) return value def drop_na(df): return df.replace(r'^\s*$', np.nan, regex=True).dropna() def extract_join_feature_set_version_id(payload): if payload['action_type'] != ActionType.JOIN: return None join_feature_set_version_id = payload['action_arguments'][0] if type(join_feature_set_version_id) == str and \ join_feature_set_version_id.startswith('%{'): join_feature_set_version_id = next( v['id'] for v in payload['action_variables'].values() if v['type'] == VariableType.FEATURE_SET_VERSION ) return join_feature_set_version_id def get_column_type(feature_uuid, action): action_variables = action.get('action_variables', {}) column_type = None for v in action_variables.values(): if v['type'] == 'feature' and v['feature']['uuid'] == feature_uuid: column_type = v['feature']['column_type'] break return column_type def get_time_window_str(window_in_seconds): if window_in_seconds is None: return None if window_in_seconds >= DAY_SECONDS: time_window = f'{int(window_in_seconds / DAY_SECONDS)}d' elif window_in_seconds >= HOUR_SECONDS: time_window = f'{int(window_in_seconds / HOUR_SECONDS)}h' else: time_window = f'{window_in_seconds}s' return time_window src/data_cleaner/transformer_actions/constants.py METASEP class ActionType(): ADD = 'add' AVERAGE = 'average' COUNT = 'count' COUNT_DISTINCT = 'count_distinct' DIFF = 'diff' DROP_DUPLICATE = 'drop_duplicate' EXPAND_COLUMN = 'expand_column' EXPLODE = 'explode' FILTER = 'filter' FIRST = 'first' GROUP = 'group' IMPUTE = 'impute' JOIN = 'join' LAST = 'last' LIMIT = 'limit' MAX = 'max' MEDIAN = 'median' MIN = 'min' MODE = 'mode' REMOVE = 'remove' SCALE = 'scale' SELECT = 'select' SHIFT_DOWN = 'shift_down' SHIFT_UP = 'shift_up' SORT = 'sort' SUM = 'sum' UNION = 'union' UPDATE_TYPE = 'update_type' UPDATE_VALUE = 'update_value' class Axis(): COLUMN = 'column' ROW = 'row' class VariableType(): FEATURE = 'feature' FEATURE_SET = 'feature_set' FEATURE_SET_VERSION = 'feature_set_version' class Operator(): CONTAINS = 'contains' NOT_CONTAINS = 'not contains' EQUALS = '==' NOT_EQUALS = '!=' GREATER_THAN = '>' GREATER_THAN_OR_EQUALS = '>=' LESS_THAN = '<' LESS_THAN_OR_EQUALS = '<=' src/data_cleaner/transformer_actions/column.py METASEP from data_cleaner.transformer_actions.action_code import query_with_action_code from data_cleaner.transformer_actions.helpers import ( convert_col_type, get_column_type, get_time_window_str, ) from data_cleaner.transformer_actions.udf.base import execute_udf import pandas as pd import numpy as np def add_column(df, action, **kwargs): col = action['outputs'][0]['uuid'] col_type = action['outputs'][0]['column_type'] udf = action['action_options'].get('udf') if udf is None: return df df_copy = df.copy() df_copy[col] = execute_udf( udf, df, action.get('action_arguments'), action.get('action_code'), action.get('action_options'), kwargs, ) df_copy[col] = convert_col_type(df_copy[col], col_type) return df_copy def average(df, action, **kwargs): return __agg(df, action, 'mean') def count(df, action, **kwargs): return __groupby_agg(df, action, 'count') def count_distinct(df, action, **kwargs): return __groupby_agg(df, action, 'nunique') def diff(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].diff() return df def first(df, action, **kwargs): return __agg(df, action, 'first') def impute(df, action, **kwargs): columns = action['action_arguments'] action_options = action['action_options'] strategy = action_options.get('strategy') value = action_options.get('value') empty_string_pattern = r'^\s*$' df[columns] = df[columns].replace(empty_string_pattern, np.nan, regex=True) if strategy == 'average': df[columns] = df[columns].fillna(df[columns].astype(float).mean(axis=0)) elif strategy == 'median': df[columns] = df[columns].fillna(df[columns].astype(float).median(axis=0)) elif strategy == 'column': replacement_df = pd.DataFrame({col: df[value] for col in columns}) df[columns] = df[columns].fillna(replacement_df) elif value is not None: df[columns] = df[columns].fillna(value) else: raise Exception('Require a valid strategy or value') for col in columns: col_type = get_column_type(col, action) df[col] = convert_col_type(df[col], col_type) return df def max(df, action, **kwargs): return __agg(df, action, 'max') def median(df, action, **kwargs): return __agg(df, action, 'median') def min(df, action, **kwargs): return __agg(df, action, 'min') def remove_column(df, action, **kwargs): cols = action['action_arguments'] original_columns = df.columns drop_columns = [col for col in cols if col in original_columns] return df.drop(columns=drop_columns) def last(df, action, **kwargs): return __agg(df, action, 'last') def select(df, action, **kwargs): return df[action['action_arguments']] def shift_down(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] action_options = action.get('action_options', {}) groupby_columns = action_options.get('groupby_columns') periods = action_options.get('periods', 1) if groupby_columns is not None: df[output_col] = df.groupby(groupby_columns)[action['action_arguments'][0]].shift(periods) else: df[output_col] = df[action['action_arguments'][0]].shift(periods) return df def shift_up(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].shift(-1) return df def sum(df, action, **kwargs): return __agg(df, action, 'sum') def __agg(df, action, agg_method): if action['action_options'].get('groupby_columns'): return __groupby_agg(df, action, agg_method) else: output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].agg(agg_method) return df def __column_mapping(action): return dict(zip(action['action_arguments'], [o['uuid'] for o in action['outputs']])) # Filter by timestamp_feature_a - window <= timestamp_feature_b <= timestamp_feature_a def __filter_df_with_time_window(df, action): action_options = action['action_options'] time_window_keys = ['timestamp_feature_a', 'timestamp_feature_b', 'window'] if all(k in action_options for k in time_window_keys): window_in_seconds = action_options['window'] df_time_diff = \ (pd.to_datetime(df[action_options['timestamp_feature_a']], utc=True) - \ pd.to_datetime(df[action_options['timestamp_feature_b']], utc=True)).dt.total_seconds() if window_in_seconds > 0: df_time_diff_filtered = \ df_time_diff[(df_time_diff <= window_in_seconds) & (df_time_diff >= 0)] else: df_time_diff_filtered = \ df_time_diff[(df_time_diff >= window_in_seconds) & (df_time_diff <= 0)] df_filtered = df.loc[df_time_diff_filtered.index] time_window = get_time_window_str(window_in_seconds) else: df_filtered = df time_window = None return df_filtered, time_window def __groupby_agg(df, action, agg_method): df_filtered, _ = __filter_df_with_time_window(df, action) action_code = action.get('action_code') if action_code is not None and action_code != '': df_filtered = query_with_action_code(df_filtered, action_code, { 'original_df': df_filtered, }) action_options = action['action_options'] df_agg = df_filtered.groupby( action_options['groupby_columns'], )[action['action_arguments']].agg(agg_method) return df.merge( df_agg.rename(columns=__column_mapping(action)), on=action_options['groupby_columns'], how='left', ) src/data_cleaner/transformer_actions/base.py METASEP from data_cleaner.transformer_actions import column, row from data_cleaner.transformer_actions.constants import ActionType, Axis, VariableType from data_cleaner.transformer_actions.helpers import drop_na from data_cleaner.transformer_actions.variable_replacer import ( interpolate, replace_true_false, ) # from pipelines.column_type_pipelines import COLUMN_TYPE_PIPELINE_MAPPING import json COLUMN_TYPE_PIPELINE_MAPPING = {} FUNCTION_MAPPING = { Axis.COLUMN: { ActionType.ADD: column.add_column, ActionType.AVERAGE: column.average, ActionType.COUNT: column.count, ActionType.COUNT_DISTINCT: column.count_distinct, ActionType.DIFF: column.diff, # ActionType.EXPAND_COLUMN: column.expand_column, ActionType.FIRST: column.first, ActionType.IMPUTE: column.impute, ActionType.LAST: column.last, ActionType.MAX: column.max, ActionType.MEDIAN: column.median, ActionType.MIN: column.min, ActionType.REMOVE: column.remove_column, ActionType.SELECT: column.select, ActionType.SHIFT_DOWN: column.shift_down, ActionType.SHIFT_UP: column.shift_up, ActionType.SUM: column.sum, }, Axis.ROW: { ActionType.DROP_DUPLICATE: row.drop_duplicates, # ActionType.EXPLODE: row.explode, ActionType.FILTER: row.filter_rows, ActionType.SORT: row.sort_rows, }, } class BaseAction(): def __init__(self, action): self.action = action self.columns_by_type = {} for variable_data in self.action.get('action_variables', {}).values(): if not variable_data: continue feature = variable_data.get(VariableType.FEATURE) if not feature: continue column_type = feature.get('column_type') if not self.columns_by_type.get(column_type): self.columns_by_type[column_type] = [] self.columns_by_type[column_type].append(feature['uuid']) @property def action_type(self): return self.action['action_type'] @property def axis(self): return self.action['axis'] def execute(self, df, **kwargs): self.hydrate_action() self.action['action_code'] = replace_true_false(self.action['action_code']) if df.empty: return df if self.action_type in [ActionType.FILTER, ActionType.ADD]: df_transformed = self.transform(df) else: df_transformed = df if self.action_type == ActionType.GROUP: df_output = self.groupby(df, self.action) elif self.action_type == ActionType.JOIN: df_to_join = kwargs.get('df_to_join') df_output = self.join(df, df_to_join, self.action) else: column_types = {} for column_type, cols in self.columns_by_type.items(): for col in cols: column_types[col] = column_type df_output = FUNCTION_MAPPING[self.axis][self.action_type]( df_transformed, self.action, column_types=column_types, original_df=df, ) if self.action_type == ActionType.FILTER: return df.loc[df_output.index][df_output.columns] elif self.action_type == ActionType.ADD: output_cols = [f['uuid'] for f in self.action['outputs']] df[output_cols] = df_output[output_cols] return df else: return df_output def groupby(self, df, action): def __transform_partition(pdf, actions): for action in actions: pdf = BaseAction(action).execute(pdf) return pdf groupby_columns = action['action_arguments'] return df.groupby(groupby_columns).apply(lambda x: __transform_partition(x, action['child_actions'])) def hydrate_action(self): for k, v in self.action['action_variables'].items(): """ k: 1, 1_1 v: { 'type': 'feature', 'id': 1, 'feature': { 'uuid': 'mage', }, } """ if not v: continue if self.action.get('action_code'): self.action['action_code'] = interpolate(self.action['action_code'], k, v) if self.action.get('action_arguments'): self.action['action_arguments'] = [interpolate( args_text, k, v, ) for args_text in self.action['action_arguments']] if self.action.get('action_options'): action_options_json = json.dumps(self.action['action_options']) self.action['action_options'] = json.loads(interpolate(action_options_json, k, v)) def join(self, df, df_to_join, action): action_options = action['action_options'] left_on = action_options['left_on'] right_on = action_options['right_on'] for i in range(len(left_on)): col1, col2 = left_on[i], right_on[i] if df[col1].dtype != df_to_join[col2].dtype: df[col1] = drop_na(df[col1]).astype(str) df_to_join[col2] = drop_na(df_to_join[col2]).astype(str) if action.get('outputs') is not None: feature_rename_mapping = { f['source_feature']['uuid']:f['uuid'] for f in action['outputs'] if f.get('source_feature') is not None } df_to_join_renamed = df_to_join.rename(columns=feature_rename_mapping) right_on = [feature_rename_mapping.get(key, key) for key in right_on] else: df_to_join_renamed = df_to_join how = action_options.get('how', 'left') df_merged = df.merge(df_to_join_renamed, left_on=left_on, right_on=right_on, how=how) drop_columns = action_options.get('drop_columns', []) rename_columns = action_options.get('rename_columns', {}) return df_merged.drop(columns=drop_columns).rename(columns=rename_columns) def transform(self, df): df_copy = df.copy() current_columns = df_copy.columns for column_type, original_columns in self.columns_by_type.items(): cols = [col for col in original_columns if col in current_columns] if len(cols) == 0: continue build_pipeline = COLUMN_TYPE_PIPELINE_MAPPING.get(column_type) if not build_pipeline: continue df_copy[cols] = build_pipeline().fit_transform(df_copy[cols]) return df_copy src/data_cleaner/transformer_actions/action_code.py METASEP from data_cleaner.transformer_actions.constants import Operator import re ACTION_CODE_CONDITION_PATTERN = re.compile( r'([^\s()]+) ([!=<>]+|(?:contains)|(?:not contains)) ([^\s()]+)' ) ORIGINAL_COLUMN_PREFIX = 'orig_' TRANSFORMED_COLUMN_PREFIX = 'tf_' def __query_mutate_null_type(match, dtype): condition = [''] column_name, operator, _ = match.groups() column_name = f'{ORIGINAL_COLUMN_PREFIX}{column_name}' if operator == '==': condition.append(f'({column_name}.isna()') if dtype == bool: condition.append(f' | {column_name} == \'\'') elif dtype == str: condition.append(f' | {column_name}.str.len() == 0') condition.append(f')') else: condition.append(f'({column_name}.notna()') if dtype == bool: condition.append(f' & {column_name} != \'\'') elif dtype == str: condition.append(f' & {column_name}.str.len() >= 1') condition.append(f')') return ''.join(condition) def __query_mutate_contains_op(match): column_name, operator, value = match.groups() column_name = f'{TRANSFORMED_COLUMN_PREFIX}{column_name}' value = value.strip('\'').strip('\"') if operator == Operator.CONTAINS: condition = f'({column_name}.notna() & {column_name}.str.contains(\'{value}\'))' else: condition = f'~({column_name}.notna() & {column_name}.str.contains(\'{value}\'))' return condition def __query_mutate_default_case(match, column_set): column_name, operator, value = match.groups() column_name = f'{TRANSFORMED_COLUMN_PREFIX}{column_name}' if value in column_set: # if comparison is with another column, prefix value with column identifier value = f'{TRANSFORMED_COLUMN_PREFIX}{value}' return f'{column_name} {operator} {value}' def __get_column_type(df, cache, column_name): dtype = cache.get(column_name, None) if dtype is None: dropped_na = df[column_name].dropna() dropped_na = dropped_na[~dropped_na.isin([''])] dtype = type(dropped_na.iloc[0]) if len(dropped_na.index) >= 1 else object cache[column_name] = dtype return dtype def query_with_action_code(df, action_code, kwargs): transformed_types, original_types = {}, {} original_df, original_merged = kwargs.get('original_df', None), False reconstructed_code = [] queried_df = df.copy().add_prefix(TRANSFORMED_COLUMN_PREFIX) column_set = set(df.columns) prev_end = 0 for match in ACTION_CODE_CONDITION_PATTERN.finditer(action_code): column_name, operator, value = match.groups() reconstructed_code.append(action_code[prev_end: match.start()]) prev_end = match.end() if operator == Operator.CONTAINS or operator == Operator.NOT_CONTAINS: transformed_dtype = __get_column_type(df, transformed_types, column_name) if transformed_dtype != str: raise TypeError( f'\'{operator}\' can only be used on string columns, {transformed_dtype}' ) reconstructed_code.append(__query_mutate_contains_op(match)) elif (operator == Operator.EQUALS or operator == Operator.NOT_EQUALS) and value == 'null': if original_df is None: raise Exception( 'Null value queries require original dataframe as keyword argument' ) elif not original_merged: queried_df = queried_df.join(original_df.add_prefix(ORIGINAL_COLUMN_PREFIX)) original_merged = True original_dtype = __get_column_type(original_df, original_types, column_name) reconstructed_code.append(__query_mutate_null_type(match, original_dtype)) else: reconstructed_code.append(__query_mutate_default_case(match, column_set)) reconstructed_code.append(action_code[prev_end:]) action_code = ''.join(reconstructed_code) queried_df = queried_df.query(action_code).rename( lambda x: x[len(TRANSFORMED_COLUMN_PREFIX):], axis='columns' ) return queried_df[df.columns] src/data_cleaner/transformer_actions/__init__.py METASEP src/data_cleaner/statistics/calculator.py METASEP from data_cleaner.shared.hash import merge_dict from data_cleaner.shared.multi import run_parallel from data_cleaner.column_type_detector import ( DATETIME, NUMBER, NUMBER_TYPES, NUMBER_WITH_DECIMALS, ) import math import numpy as np import pandas as pd import traceback VALUE_COUNT_LIMIT = 255 def increment(metric, tags): pass class timer(object): """ with timer('metric.metric', tags={ 'key': 'value' }): function() """ def __init__(self, metric, tags={}): pass def __enter__(self): pass def __exit__(self, type, value, traceback): pass class StatisticsCalculator(): def __init__( self, # s3_client, # object_key_prefix, # feature_set_version, column_types, **kwargs, ): self.column_types = column_types @property def data_tags(self): return dict() def process(self, df): return self.calculate_statistics_overview(df) def calculate_statistics_overview(self, df): increment( 'lambda.transformer_actions.calculate_statistics_overview.start', self.data_tags, ) with timer( 'lambda.transformer_actions.calculate_statistics_overview.time', self.data_tags): data = dict(count=len(df.index)) arr_args_1 = [df[col] for col in df.columns], arr_args_2 = [col for col in df.columns], dicts = run_parallel(self.statistics_overview, arr_args_1, arr_args_2) for d in dicts: data.update(d) # object_key = s3_paths.path_statistics_overview(self.object_key_prefix) # s3_data.upload_json_sorted(self.s3_client, object_key, data) increment( 'lambda.transformer_actions.calculate_statistics_overview.success', self.data_tags, ) return data def statistics_overview(self, series, col): try: return self.__statistics_overview(series, col) except Exception as err: increment( 'lambda.transformer_actions.calculate_statistics_overview.column.failed', merge_dict(self.data_tags, { 'col': col, 'error': err.__class__.__name__, }), ) traceback.print_exc() return {} def __statistics_overview(self, series, col): # The following regex based replace has high overheads # series = series.replace(r'^\s*$', np.nan, regex=True) series_cleaned = series.map(lambda x: x if (not isinstance(x, str) or (len(x) > 0 and not x.isspace())) else np.nan) df_value_counts = series_cleaned.value_counts(dropna=False) df = df_value_counts.reset_index() df.columns = [col, 'count'] df_top_value_counts = df if df.shape[0] > VALUE_COUNT_LIMIT: df_top_value_counts = df.head(VALUE_COUNT_LIMIT) # TODO: remove duplicate data for distinct values # object_key_distinct_values = s3_paths.path_distinct_values_by_column(self.object_key_prefix, col) # s3_data.upload_dataframe(self.s3_client, df_top_value_counts, object_key_distinct_values, columns=[col]) # object_key_statistics = s3_paths.path_statistics_by_column(self.object_key_prefix, col) # s3_data.upload_dataframe(self.s3_client, df_top_value_counts, object_key_statistics) # features = self.feature_set_version['features'] # feature = find(lambda x: x['uuid'] == col, features) # if feature and feature.get('transformed'): # return {} column_type = self.column_types.get(col) series_non_null = series_cleaned.dropna() if column_type == NUMBER: series_non_null = series_non_null.astype(float).astype(int) elif column_type == NUMBER_WITH_DECIMALS: series_non_null = series_non_null.astype(float) count_unique = len(df_value_counts.index) data = { f'{col}/count': series_non_null.size, f'{col}/count_distinct': count_unique - 1 if np.nan in df_value_counts else count_unique, f'{col}/null_value_rate': 0 if series_cleaned.size == 0 else series_cleaned.isnull().sum() / series_cleaned.size, } if len(series_non_null) == 0: return data dates = None if column_type in NUMBER_TYPES: data[f'{col}/average'] = series_non_null.sum() / len(series_non_null) data[f'{col}/max'] = series_non_null.max() data[f'{col}/median'] = series_non_null.quantile(0.5) data[f'{col}/min'] = series_non_null.min() data[f'{col}/sum'] = series_non_null.sum() elif column_type == DATETIME: dates = pd.to_datetime(series_non_null, utc=True, errors='coerce').dropna() data[f'{col}/max'] = dates.max().isoformat() data[f'{col}/median'] = dates.sort_values().iloc[math.floor(len(dates) / 2)].isoformat() data[f'{col}/min'] = dates.min().isoformat() if column_type not in NUMBER_TYPES: if dates is not None: value_counts = dates.value_counts() else: value_counts = series_non_null.value_counts() mode = value_counts.index[0] if column_type == DATETIME: mode = mode.isoformat() data[f'{col}/mode'] = mode return data src/data_cleaner/statistics/__init__.py METASEP src/data_cleaner/shared/utils.py METASEP from data_cleaner.column_type_detector import ( NUMBER, NUMBER_WITH_DECIMALS, ) import numpy as np def clean_series(series, column_type, dropna=True): series_cleaned = series.map( lambda x: x if (not isinstance(x, str) or (len(x) > 0 and not x.isspace())) else np.nan, ) if dropna: series_cleaned = series_cleaned.dropna() if column_type == NUMBER: try: series_cleaned = series_cleaned.astype(float).astype(int) except ValueError: series_cleaned = series_cleaned.astype(float) elif column_type == NUMBER_WITH_DECIMALS: series_cleaned = series_cleaned.astype(float) return series_cleaned src/data_cleaner/shared/multi.py METASEP from concurrent.futures import ThreadPoolExecutor from threading import Thread MAX_WORKERS = 16 def start_thread(target, **kwargs): thread = Thread( target=target, kwargs=kwargs, ) thread.start() return thread def parallelize(func, arr): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, arr) def parallelize_multiple_args(func, arr_args): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, *zip(*arr_args)) def run_parallel_threads(list_of_funcs_and_args_or_kwargs): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: for func, args in list_of_funcs_and_args_or_kwargs: pool.submit(func, *args) def run_parallel(func, arr_args_1, arr_args_2): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, *arr_args_1, *arr_args_2) src/data_cleaner/shared/hash.py METASEP from functools import reduce import math import re def dig(obj_arg, arr_or_string): if type(arr_or_string) is str: arr_or_string = arr_or_string.split('.') arr = list(map(str.strip, arr_or_string)) def _build(obj, key): tup = re.split(r'\[(\d+)\]$', key) if len(tup) >= 2: key, index = filter(lambda x: x, tup) if key and index: return obj[key][int(index)] elif index: return obj[int(index)] elif obj: return obj.get(key) else: return obj return reduce(_build, arr, obj_arg) def flatten(input_data): final_data = {} for k1, v1 in input_data.items(): if type(v1) is dict: for k2, v2 in v1.items(): if type(v2) is dict: for k3, v3 in v2.items(): final_data[f'{k1}_{k2}_{k3}'] = v3 else: final_data[f'{k1}_{k2}'] = v2 else: final_data[k1] = v1 return final_data def ignore_keys(d, keys): d_keys = d.keys() d2 = d.copy() for key in keys: if key in d_keys: d2.pop(key) return d2 def ignore_keys_with_blank_values(d): d2 = d.copy() for key, value in d.items(): if not value: d2.pop(key) return d2 def extract(d, keys): def _build(obj, key): val = d.get(key, None) if val is not None: obj[key] = val return obj return reduce(_build, keys, {}) def extract_arrays(input_data): arr = [] for k, v in input_data.items(): if type(v) is list: arr.append(v) return arr def group_by(func, arr): def _build(obj, item): val = func(item) if not obj.get(val): obj[val] = [] obj[val].append(item) return obj return reduce(_build, arr, {}) def index_by(func, arr): obj = {} for item in arr: key = func(item) obj[key] = item return obj def merge_dict(a, b): c = a.copy() c.update(b) return c def replace_dict_nan_value(d): def _replace_nan_value(v): if type(v) == float and math.isnan(v): return None return v return {k: _replace_nan_value(v) for k, v in d.items()} src/data_cleaner/shared/array.py METASEP import random def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)] def difference(li1, li2): li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2] return li_dif def flatten(arr): return [item for sublist in arr for item in sublist] def find(condition, arr, map=None): try: return next(map(x) if map else x for x in arr if condition(x)) except StopIteration: return None def sample(arr): return arr[random.randrange(0, len(arr))] def subtract(arr1, arr2): return [i for i in arr1 if i not in arr2] src/data_cleaner/shared/__init__.py METASEP src/data_cleaner/pipelines/base.py METASEP from data_cleaner.cleaning_rules.remove_columns_with_high_empty_rate \ import RemoveColumnsWithHighEmptyRate from data_cleaner.cleaning_rules.remove_columns_with_single_value \ import RemoveColumnsWithSingleValue from data_cleaner.transformer_actions.base import BaseAction DEFAULT_RULES = [ RemoveColumnsWithHighEmptyRate, RemoveColumnsWithSingleValue, ] class BasePipeline(): def __init__(self, actions=[]): self.actions = actions self.rules = DEFAULT_RULES def create_actions(self, df, column_types, statistics): all_suggestions = [] for rule in self.rules: suggestions = rule(df, column_types, statistics).evaluate() if suggestions: all_suggestions += suggestions self.actions = all_suggestions return all_suggestions def transform(self, df): if len(self.actions) == 0: print('Pipeline is empty.') return df df_transformed = df for action in self.actions: df_transformed = BaseAction(action['action_payload']).execute(df_transformed) return df_transformed src/data_cleaner/pipelines/__init__.py METASEP src/data_cleaner/cleaning_rules/unit_conversion.py METASEP src/data_cleaner/cleaning_rules/type_conversion.py METASEP src/data_cleaner/cleaning_rules/remove_outliers.py METASEP src/data_cleaner/cleaning_rules/remove_duplicate_rows.py METASEP src/data_cleaner/cleaning_rules/remove_columns_with_single_value.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveColumnsWithSingleValue(BaseRule): # Check statistic [feature_uuid]/count_distinct def evaluate(self): columns_with_single_value = [] for c in self.df_columns: if f'{c}/count_distinct' not in self.statistics: continue feature_count_distinct = self.statistics[f'{c}/count_distinct'] if feature_count_distinct == 1: columns_with_single_value.append(c) suggestions = [] suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with single value', f'The following columns have single value in all rows: {columns_with_single_value}.'\ ' Suggest to remove them.', ActionType.REMOVE, action_arguments=columns_with_single_value, axis=Axis.COLUMN, )) return suggestions src/data_cleaner/cleaning_rules/remove_columns_with_high_empty_rate.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveColumnsWithHighEmptyRate(BaseRule): MISSING_RATE_THRESHOLD = 0.8 def evaluate(self): columns_with_missing_values = [] columns_with_no_values = [] for c in self.df_columns: if self.statistics.get(f'{c}/count') == 0: columns_with_no_values.append(c) elif f'{c}/null_value_rate' in self.statistics: null_value_rate = self.statistics[f'{c}/null_value_rate'] if null_value_rate >= self.MISSING_RATE_THRESHOLD: columns_with_missing_values.append(c) suggestions = [] if len(columns_with_no_values) > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with no values', f'The following columns have no values: {columns_with_no_values}.'\ ' Removing them may increase your data quality.', ActionType.REMOVE, action_arguments=columns_with_no_values, axis=Axis.COLUMN, )) if len(columns_with_missing_values) > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with high empty rate', f'The following columns have high empty rate: {columns_with_missing_values}.'\ ' Removing them may increase your data quality.', ActionType.REMOVE, action_arguments=columns_with_missing_values, axis=Axis.COLUMN, )) return suggestions src/data_cleaner/cleaning_rules/remove_columns_with_few_unique_values.py METASEP src/data_cleaner/cleaning_rules/remove_collinear_columns.py METASEP src/data_cleaner/cleaning_rules/reformat_values.py METASEP src/data_cleaner/cleaning_rules/impute_values.py METASEP src/data_cleaner/cleaning_rules/fix_encoding.py METASEP src/data_cleaner/cleaning_rules/base.py METASEP class BaseRule: def __init__(self, df, column_types, statistics): self.df = df self.df_columns = df.columns.tolist() self.column_types = column_types self.statistics = statistics def evaluate(self): """Evaluate data cleaning rule and generate suggested actions Returns ------- A list of suggested actions """ return [] def _build_transformer_action_suggestion( self, title, message, action_type, action_arguments=[], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ): return dict( title=title, message=message, action_payload=dict( action_type=action_type, action_arguments=action_arguments, action_code=action_code, action_options=action_options, action_variables=action_variables, axis=axis, outputs=outputs, ), ) src/data_cleaner/cleaning_rules/__init__.py METASEP src/data_cleaner/analysis/constants.py METASEP CHART_TYPE_BAR_HORIZONTAL = 'bar_horizontal' CHART_TYPE_LINE_CHART = 'line_chart' CHART_TYPE_HISTOGRAM = 'histogram' LABEL_TYPE_RANGE = 'range' DATA_KEY_CHARTS = 'charts' DATA_KEY_CORRELATION = 'correlations' DATA_KEY_OVERVIEW = 'overview' DATA_KEY_TIME_SERIES = 'time_series' DATA_KEYS = [ DATA_KEY_CHARTS, DATA_KEY_CORRELATION, DATA_KEY_OVERVIEW, DATA_KEY_TIME_SERIES, ] src/data_cleaner/analysis/charts.py METASEP from data_cleaner.analysis.constants import ( CHART_TYPE_BAR_HORIZONTAL, CHART_TYPE_LINE_CHART, CHART_TYPE_HISTOGRAM, DATA_KEY_TIME_SERIES, LABEL_TYPE_RANGE, ) from data_cleaner.shared.utils import clean_series from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ) import dateutil.parser import math import numpy as np import pandas as pd DD_KEY = 'lambda.analysis_charts' BUCKETS = 40 TIME_SERIES_BUCKETS = 40 def increment(metric, tags={}): pass def build_buckets(min_value, max_value, max_buckets, column_type): diff = max_value - min_value total_interval = 1 + diff bucket_interval = total_interval / max_buckets number_of_buckets = max_buckets is_integer = False parts = str(diff).split('.') if len(parts) == 1: is_integer = True else: is_integer = int(parts[1]) == 0 if NUMBER == column_type and total_interval <= max_buckets and is_integer: number_of_buckets = int(total_interval) bucket_interval = 1 elif bucket_interval > 1: bucket_interval = math.ceil(bucket_interval) else: bucket_interval = round(bucket_interval * 100, 1) / 100 buckets = [] for i in range(number_of_buckets): min_v = min_value + (i * bucket_interval) max_v = min_value + ((i + 1) * bucket_interval) buckets.append(dict( max_value=max_v, min_value=min_v, values=[], )) return buckets, bucket_interval def build_histogram_data(col1, series, column_type): increment(f'{DD_KEY}.build_histogram_data.start', dict(feature_uuid=col1)) max_value = series.max() min_value = series.min() buckets, bucket_interval = build_buckets(min_value, max_value, BUCKETS, column_type) if bucket_interval == 0: return for value in series.values: index = math.floor((value - min_value) / bucket_interval) if value == max_value: index = len(buckets) - 1 buckets[index]['values'].append(value) x = [] y = [] for bucket in buckets: x.append(dict( max=bucket['max_value'], min=bucket['min_value'], )) y.append(dict(value=len(bucket['values']))) increment(f'{DD_KEY}.build_histogram_data.succeeded', dict(feature_uuid=col1)) return dict( type=CHART_TYPE_HISTOGRAM, x=x, x_metadata=dict( label_type=LABEL_TYPE_RANGE, ), y=y, ) def build_correlation_data(df, col1, features): increment(f'{DD_KEY}.build_correlation_data.start', dict(feature_uuid=col1)) x = [] y = [] df_copy = df.copy() for feature in features: col2 = feature['uuid'] column_type = feature['column_type'] series = df_copy[col2] df_copy[col2] = clean_series(series, column_type, dropna=False) corr = df_copy.corr() for feature in features: col2 = feature['uuid'] if col1 != col2: value = corr[col1].get(col2, None) if value is not None: x.append(dict(label=col2)) y.append(dict(value=value)) increment(f'{DD_KEY}.build_correlation_data.succeeded', dict(feature_uuid=col1)) return dict( type=CHART_TYPE_BAR_HORIZONTAL, x=x, y=y, ) def build_time_series_data(df, feature, datetime_column, column_type): col1 = feature['uuid'] column_type = feature['column_type'] tags = dict( column_type=column_type, datetime_column=datetime_column, feature_uuid=col1, ) increment(f'{DD_KEY}.build_time_series_data.start', tags) # print(feature, datetime_column) datetimes = clean_series(df[datetime_column], DATETIME) if datetimes.size <= 1: return min_value_datetime = dateutil.parser.parse(datetimes.min()).timestamp() max_value_datetime = dateutil.parser.parse(datetimes.max()).timestamp() buckets, bucket_interval = build_buckets( min_value_datetime, max_value_datetime, TIME_SERIES_BUCKETS, column_type, ) x = [] y = [] df_copy = df.copy() df_copy[datetime_column] = \ pd.to_datetime(df[datetime_column]).apply(lambda x: x if pd.isnull(x) else x.timestamp()) for bucket in buckets: max_value = bucket['max_value'] min_value = bucket['min_value'] series = df_copy[( df_copy[datetime_column] >= min_value ) & ( df_copy[datetime_column] < max_value )][col1] x.append(dict( max=max_value, min=min_value, )) series_cleaned = clean_series(series, column_type, dropna=False) df_value_counts = series_cleaned.value_counts(dropna=False) series_non_null = series_cleaned.dropna() count_unique = len(df_value_counts.index) y_data = dict( count=series_non_null.size, count_distinct=count_unique - 1 if np.nan in df_value_counts else count_unique, null_value_rate=0 if series_cleaned.size == 0 else series_cleaned.isnull().sum() / series_cleaned.size, ) if column_type in [NUMBER, NUMBER_WITH_DECIMALS]: y_data.update(dict( average=series_non_null.sum() / len(series_non_null), max=series_non_null.max(), median=series_non_null.quantile(0.5), min=series_non_null.min(), sum=series_non_null.sum(), )) elif column_type in [CATEGORY, CATEGORY_HIGH_CARDINALITY, TRUE_OR_FALSE]: value_counts = series_non_null.value_counts() if len(value_counts.index): value_counts_top = value_counts.sort_values(ascending=False).iloc[:12] mode = value_counts_top.index[0] y_data.update(dict( mode=mode, value_counts=value_counts_top.to_dict(), )) y.append(y_data) increment(f'{DD_KEY}.build_time_series_data.succeeded', tags) return dict( type=CHART_TYPE_LINE_CHART, x=x, x_metadata=dict( label=datetime_column, label_type=LABEL_TYPE_RANGE, ), y=y, ) def build_overview_data(df, datetime_features): increment(f'{DD_KEY}.build_overview_data.start') time_series = [] df_copy = df.copy() for feature in datetime_features: column_type = feature['column_type'] datetime_column = feature['uuid'] tags = dict(datetime_column=datetime_column) increment(f'{DD_KEY}.build_overview_time_series.start', tags) if clean_series(df_copy[datetime_column], DATETIME).size <= 1: continue df_copy[datetime_column] = \ pd.to_datetime(df[datetime_column]).apply(lambda x: x if pd.isnull(x) else x.timestamp()) min_value1 = df_copy[datetime_column].min() max_value1 = df_copy[datetime_column].max() buckets, bucket_interval = build_buckets(min_value1, max_value1, TIME_SERIES_BUCKETS, column_type) x = [] y = [] for bucket in buckets: max_value = bucket['max_value'] min_value = bucket['min_value'] df_filtered = df_copy[( df_copy[datetime_column] >= min_value ) & ( df_copy[datetime_column] < max_value )] x.append(dict( max=max_value, min=min_value, )) y.append(dict( count=len(df_filtered.index), )) time_series.append(dict( type=CHART_TYPE_LINE_CHART, x=x, x_metadata=dict( label=datetime_column, label_type=LABEL_TYPE_RANGE, ), y=y, )) increment(f'{DD_KEY}.build_overview_time_series.succeeded', tags) increment(f'{DD_KEY}.build_overview_data.succeeded') return { DATA_KEY_TIME_SERIES: time_series, } src/data_cleaner/analysis/calculator.py METASEP from data_cleaner.analysis import charts from data_cleaner.analysis.constants import ( DATA_KEY_CHARTS, DATA_KEY_CORRELATION, DATA_KEY_TIME_SERIES, ) from data_cleaner.shared.utils import clean_series from data_cleaner.shared.hash import merge_dict from data_cleaner.shared.multi import run_parallel from data_cleaner.transformer_actions import constants from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ) DD_KEY = 'lambda.analysis_calculator' def increment(metric, tags={}): pass class AnalysisCalculator(): def __init__( self, df, column_types, **kwargs, ): self.df = df self.column_types = column_types self.features = [{'uuid': col, 'column_type': column_types.get(col)} for col in df.columns] def process(self, df): increment(f'{DD_KEY}.process.start', self.tags) df_columns = df.columns features_to_use = self.features datetime_features_to_use = [f for f in self.datetime_features if f['uuid'] in df_columns] arr_args_1 = [df for _ in features_to_use], arr_args_2 = features_to_use, data_for_columns = [d for d in run_parallel(self.calculate_column, arr_args_1, arr_args_2)] overview = charts.build_overview_data( df, datetime_features_to_use, ) correlation_overview = [] for d in data_for_columns: corr = d.get(DATA_KEY_CORRELATION) if corr: correlation_overview.append({ 'feature': d['feature'], DATA_KEY_CORRELATION: corr, }) increment(f'{DD_KEY}.process.succeeded', self.tags) return data_for_columns, merge_dict(overview, { DATA_KEY_CORRELATION: correlation_overview, }) @property def features_by_uuid(self): data = {} for feature in self.features: data[feature['uuid']] = feature return data @property def datetime_features(self): return [f for f in self.features if f['column_type'] == DATETIME] @property def tags(self): return dict() def calculate_column(self, df, feature): df_columns = df.columns features_to_use = [f for f in self.features if f['uuid'] in df_columns] datetime_features_to_use = [f for f in self.datetime_features if f['uuid'] in df_columns] col = feature['uuid'] column_type = feature['column_type'] tags = merge_dict(self.tags, dict(column_type=column_type, feature_uuid=col)) increment(f'{DD_KEY}.calculate_column.start', tags) series = df[col] series_cleaned = clean_series(series, column_type) chart_data = [] correlation = [] time_series = [] if column_type in [NUMBER, NUMBER_WITH_DECIMALS]: histogram_data = charts.build_histogram_data(col, series_cleaned, column_type) if histogram_data: chart_data.append(histogram_data) correlation.append(charts.build_correlation_data(df, col, features_to_use)) if column_type in [ CATEGORY, CATEGORY_HIGH_CARDINALITY, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ]: time_series = [] for f in datetime_features_to_use: time_series_chart = charts.build_time_series_data(df, feature, f['uuid'], column_type) if time_series_chart: time_series.append(time_series_chart) increment(f'{DD_KEY}.calculate_column.succeeded', tags) return { 'feature': feature, DATA_KEY_CHARTS: chart_data, DATA_KEY_CORRELATION: correlation, DATA_KEY_TIME_SERIES: time_series, } src/data_cleaner/analysis/__init__.py METASEP src/data_cleaner/tests/cleaning_rules/test_remove_columns_with_single_value.py METASEP from data_cleaner.tests.base_test import TestCase from data_cleaner.cleaning_rules.remove_columns_with_single_value \ import RemoveColumnsWithSingleValue import pandas as pd import numpy as np class RemoveColumnWithSingleValueTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01', True], [2, '2022-01-02', True], [3, np.NaN, True], [4, np.NaN, True], [5, np.NaN, True], ], columns=['id', 'deleted_at', 'is_active']) column_types = { 'id': 'number', 'deleted_at': 'datetime', 'is_active': 'true_or_false', } statistics = { 'id/count_distinct': 5, 'deleted_at/count_distinct': 2, 'is_active/count_distinct': 1, } result = RemoveColumnsWithSingleValue(df, column_types, statistics).evaluate() self.assertEqual(result, [ dict( title='Remove columns with single value', message=f'The following columns have single value in all rows: [\'is_active\'].'\ ' Suggest to remove them.', action_payload=dict( action_type='remove', action_arguments=['is_active'], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) src/data_cleaner/tests/cleaning_rules/test_remove_columns_with_high_empty_rate.py METASEP from data_cleaner.tests.base_test import TestCase from data_cleaner.cleaning_rules.remove_columns_with_high_empty_rate \ import RemoveColumnsWithHighEmptyRate import numpy as np import pandas as pd class RemoveColumnWithHighMissingRateTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01'], [2, np.NaN], [3, np.NaN], [4, np.NaN], [5, np.NaN], ], columns=['id', 'deleted_at']) column_types = { 'id': 'number', 'deleted_at': 'datetime', } statistics = { 'id/null_value_rate': 0, 'deleted_at/null_value_rate': 0.8, } result = RemoveColumnsWithHighEmptyRate( df, column_types, statistics, ).evaluate() self.assertEqual(result, [ dict( title='Remove columns with high empty rate', message='The following columns have high empty rate: [\'deleted_at\'].'\ ' Removing them may increase your data quality.', action_payload=dict( action_type='remove', action_arguments=['deleted_at'], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) src/data_cleaner/tests/cleaning_rules/__init__.py METASEP src/data_cleaner/data_cleaner.py METASEP from data_cleaner import column_type_detector from data_cleaner.analysis.calculator import AnalysisCalculator from data_cleaner.pipelines.base import BasePipeline from data_cleaner.shared.hash import merge_dict from data_cleaner.statistics.calculator import StatisticsCalculator def clean(df): cleaner = DataCleaner() return cleaner.clean(df) class DataCleaner(): def analyze(self, df): """ Analyze a dataframe 1. Detect column types 2. Calculate statisitics 3. Calculate analysis """ column_types = column_type_detector.infer_column_types(df) statistics = StatisticsCalculator(column_types).process(df) analysis = AnalysisCalculator(df, column_types).process(df) return dict( analysis=analysis, column_types=column_types, statistics=statistics, ) def clean(self, df): df_stats = self.analyze(df) pipeline = BasePipeline() suggested_actions = pipeline.create_actions( df, df_stats['column_types'], df_stats['statistics'], ) df_cleaned = pipeline.transform(df) return merge_dict(df_stats, dict( df_cleaned=df_cleaned, suggested_actions=suggested_actions, )) src/data_cleaner/column_type_detector.py METASEP from data_cleaner.shared.array import subtract import numpy as np import re import warnings DATETIME_MATCHES_THRESHOLD = 0.5 MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES = 40 CATEGORY = 'category' CATEGORY_HIGH_CARDINALITY = 'category_high_cardinality' DATETIME = 'datetime' EMAIL = 'email' NUMBER = 'number' NUMBER_WITH_DECIMALS = 'number_with_decimals' PHONE_NUMBER = 'phone_number' TEXT = 'text' TRUE_OR_FALSE = 'true_or_false' ZIP_CODE = 'zip_code' NUMBER_TYPES = [NUMBER, NUMBER_WITH_DECIMALS] STRING_TYPES = [EMAIL, PHONE_NUMBER, TEXT, ZIP_CODE] COLUMN_TYPES = [ CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, EMAIL, NUMBER, NUMBER_WITH_DECIMALS, PHONE_NUMBER, TEXT, TRUE_OR_FALSE, ZIP_CODE, ] REGEX_DATETIME_PATTERN = r'^[\d]{2,4}-[\d]{1,2}-[\d]{1,2}$|^[\d]{2,4}-[\d]{1,2}-[\d]{1,2}[Tt ]{1}[\d]{1,2}:[\d]{1,2}[:]{0,1}[\d]{1,2}[\.]{0,1}[\d]*|^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$|^\d{1,4}[-\/]{1}\d{1,2}[-\/]{1}\d{1,4}$|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+(\d{1,2})[\s,]+(\d{2,4})' REGEX_EMAIL_PATTERN = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$" REGEX_EMAIL = re.compile(REGEX_EMAIL_PATTERN) REGEX_INTEGER_PATTERN = r'^[\-]{0,1}[\$]{0,1}[0-9,]+$' REGEX_INTEGER = re.compile(REGEX_INTEGER_PATTERN) REGEX_NUMBER_PATTERN = r'^[\-]{0,1}[\$]{0,1}[0-9,]+\.[0-9]*%{0,1}$|^[\-]{0,1}[\$]{0,1}[0-9,]+%{0,1}$' REGEX_NUMBER = re.compile(REGEX_NUMBER_PATTERN) REGEX_PHONE_NUMBER_PATTERN = r'^\s*(?:\+?(\d{1,3}))?[-. (]*(\d{3})[-. )]*(\d{3})[-. ]*(\d{4})(?: *x(\d+))?\s*$' REGEX_PHONE_NUMBER = re.compile(REGEX_PHONE_NUMBER_PATTERN) REGEX_ZIP_CODE_PATTERN = r'^\d{3,5}(?:[-\s]\d{4})?$' REGEX_ZIP_CODE = re.compile(REGEX_ZIP_CODE_PATTERN) def infer_column_types(df, **kwargs): binary_feature_names = [] category_feature_names = [] datetime_feature_names = [] email_features = [] float_feature_names = [] integer_feature_names = [] non_number_feature_names = [] phone_number_feature_names = [] text_feature_names = [] zip_code_feature_names = [] for idx, col_type in enumerate(df.dtypes): col_name = df.columns[idx] if 'datetime64' in str(col_type): datetime_feature_names.append(col_name) elif col_type == 'object': df_sub = df[col_name].copy() df_sub = df_sub.replace('^\s+$', np.nan, regex=True) df_sub = df_sub.dropna() df_sub = df_sub.apply(lambda x: x.strip() if type(x) is str else x) if df_sub.empty: non_number_feature_names.append(col_name) else: first_item = df_sub.iloc[0] if type(first_item) is list: text_feature_names.append(col_name) elif type(first_item) is bool or type(first_item) is np.bool_: if len(df[col_name].unique()) <= 2: binary_feature_names.append(col_name) else: category_feature_names.append(col_name) elif len(df[col_name].unique()) <= 2: binary_feature_names.append(col_name) else: df_sub = df_sub.astype(str) incorrect_emails = len( df_sub[df_sub.str.contains(REGEX_EMAIL) == False].index, ) warnings.filterwarnings('ignore', 'This pattern has match groups') incorrect_phone_numbers = len( df_sub[df_sub.str.contains(REGEX_PHONE_NUMBER) == False].index, ) incorrect_zip_codes = len( df_sub[df_sub.str.contains(REGEX_ZIP_CODE) == False].index, ) if all(df_sub.str.contains(REGEX_INTEGER)): integer_feature_names.append(col_name) elif all(df_sub.str.contains(REGEX_NUMBER)): float_feature_names.append(col_name) elif incorrect_emails / len(df_sub.index) <= 0.99: email_features.append(col_name) elif incorrect_phone_numbers / len(df_sub.index) <= 0.99: phone_number_feature_names.append(col_name) elif incorrect_zip_codes / len(df_sub.index) <= 0.99: zip_code_feature_names.append(col_name) else: non_number_feature_names.append(col_name) elif col_type == 'bool': binary_feature_names.append(col_name) elif np.issubdtype(col_type, np.floating): float_feature_names.append(col_name) elif np.issubdtype(col_type, np.integer): df_sub = df[col_name].copy() df_sub = df_sub.dropna() if df_sub.min() >= 100 and df_sub.max() <= 99999 and 'zip' in col_name.lower(): zip_code_feature_names.append(col_name) else: integer_feature_names.append(col_name) number_feature_names = float_feature_names + integer_feature_names binary_feature_names += \ [col for col in number_feature_names if df[col].nunique(dropna=False) == 2] binary_feature_names += \ [col for col in non_number_feature_names if df[col].nunique(dropna=False) == 2] float_feature_names = [col for col in float_feature_names if col not in binary_feature_names] integer_feature_names = \ [col for col in integer_feature_names if col not in binary_feature_names] for col_name in subtract(non_number_feature_names, binary_feature_names): df_drop_na = df[col_name].dropna() if df_drop_na.empty: text_feature_names.append(col_name) else: matches = df_drop_na.astype(str).str.contains(REGEX_DATETIME_PATTERN) matches = matches.where(matches == True).dropna() if type(df_drop_na.iloc[0]) is list: text_feature_names.append(col_name) elif len(df_drop_na[matches.index]) / len(df_drop_na) >= DATETIME_MATCHES_THRESHOLD: datetime_feature_names.append(col_name) elif df_drop_na.nunique() / len(df_drop_na) >= 0.8: text_feature_names.append(col_name) else: word_count, _ = \ df[col_name].dropna().map(lambda x: (len(str(x).split(' ')), str(x))).max() if word_count > MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES: text_feature_names.append(col_name) else: category_feature_names.append(col_name) low_cardinality_category_feature_names = \ [col for col in category_feature_names if df[col].nunique() <= kwargs.get( 'category_cardinality_threshold', 255, )] high_cardinality_category_feature_names = \ [col for col in category_feature_names if col not in low_cardinality_category_feature_names] column_types = {} array_types_mapping = { CATEGORY: low_cardinality_category_feature_names, CATEGORY_HIGH_CARDINALITY: high_cardinality_category_feature_names, DATETIME: datetime_feature_names, EMAIL: email_features, NUMBER: integer_feature_names, NUMBER_WITH_DECIMALS: float_feature_names, PHONE_NUMBER: phone_number_feature_names, TEXT: text_feature_names, TRUE_OR_FALSE: binary_feature_names, ZIP_CODE: zip_code_feature_names, } for col_type, arr in array_types_mapping.items(): for col in arr: column_types[col] = col_type return column_types src/data_cleaner/__init__.py METASEP src/data_cleaner/tests/base_test.py METASEP import unittest class TestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): pass src/data_cleaner/tests/__init__.py METASEP src/data_cleaner/tests/transformer_actions/test_base.py METASEP
[ { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):", "type": "infile" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(\n action_type='shift_down',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='prev_sold')]\n ),\n ],\n ))\n df_new = base_action.execute(df)\n df_new = df_new.fillna(0)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-01', 1000, 0, 0],\n ['a', '2020-01-02', 1100, 100, 1000],\n ['a', '2020-01-03', 1050, -50, 1100],\n ['b', '2020-01-03', 1200, 0, 0],\n ['b', '2020-01-04', 990, -210, 1200],\n ])\n\n def test_hydrate_action(self):", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(\n action_type='shift_down',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='prev_sold')]\n ),\n ],\n ))\n df_new = base_action.execute(df)\n df_new = df_new.fillna(0)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-01', 1000, 0, 0],\n ['a', '2020-01-02', 1100, 100, 1000],\n ['a', '2020-01-03', 1050, -50, 1100],\n ['b', '2020-01-03', 1200, 0, 0],\n ['b', '2020-01-04', 990, -210, 1200],\n ])\n\n def test_hydrate_action(self):\n base_action = BaseAction(TEST_ACTION)", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(\n action_type='shift_down',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='prev_sold')]\n ),\n ],\n ))\n df_new = base_action.execute(df)\n df_new = df_new.fillna(0)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-01', 1000, 0, 0],\n ['a', '2020-01-02', 1100, 100, 1000],\n ['a', '2020-01-03', 1050, -50, 1100],\n ['b', '2020-01-03', 1200, 0, 0],\n ['b', '2020-01-04', 990, -210, 1200],\n ])\n\n def test_hydrate_action(self):\n base_action = BaseAction(TEST_ACTION)\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_hydrate_action_when_adding_column(self):", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(\n action_type='shift_down',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='prev_sold')]\n ),\n ],\n ))\n df_new = base_action.execute(df)\n df_new = df_new.fillna(0)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-01', 1000, 0, 0],\n ['a', '2020-01-02', 1100, 100, 1000],\n ['a', '2020-01-03', 1050, -50, 1100],\n ['b', '2020-01-03', 1200, 0, 0],\n ['b', '2020-01-04', 990, -210, 1200],\n ])\n\n def test_hydrate_action(self):\n base_action = BaseAction(TEST_ACTION)\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_hydrate_action_when_adding_column(self):\n base_action = BaseAction(merge_dict(TEST_ACTION, dict(", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(\n action_type='shift_down',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='prev_sold')]\n ),\n ],\n ))\n df_new = base_action.execute(df)\n df_new = df_new.fillna(0)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-01', 1000, 0, 0],\n ['a', '2020-01-02', 1100, 100, 1000],\n ['a', '2020-01-03', 1050, -50, 1100],\n ['b', '2020-01-03', 1200, 0, 0],\n ['b', '2020-01-04', 990, -210, 1200],\n ])\n\n def test_hydrate_action(self):\n base_action = BaseAction(TEST_ACTION)\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_hydrate_action_when_adding_column(self):\n base_action = BaseAction(merge_dict(TEST_ACTION, dict(\n action_type='add',\n axis='column',\n )))\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_type'] = 'add'\n hydrated_action['axis'] = 'column'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_join(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A'],\n ['b', 'Store B'],\n ], columns=[\n 'store_name',\n 'description',\n ])\n base_action = BaseAction(dict(\n action_type='join',\n action_arguments=[100],\n action_code='',\n action_options=dict(\n left_on=['store'],\n right_on=['store_name'],\n drop_columns=['store_name'],\n rename_columns={'description': 'store_description'}\n ),\n action_variables=dict(),\n ))\n df_new = base_action.execute(df1, df_to_join=df2)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-03', 1050, 'Store A'],\n ['a', '2020-01-01', 1000, 'Store A'],\n ['b', '2020-01-04', 990, 'Store B'],\n ['a', '2020-01-02', 1100, 'Store A'],\n ['b', '2020-01-03', 1200, 'Store B'],\n ['c', '2020-01-07', 1250, np.NaN],\n ])\n self.assertEqual(df_new.columns.to_list(), [\n 'store', 'date', 'sold', 'store_description',\n ])\n\n def test_join_rename_column(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A', '2020-02-01'],\n ['b', 'Store B', '2020-02-02'],\n ], columns=[\n 'store_name',\n 'description',\n 'date',\n ])\n base_action = BaseAction(dict(\n action_type='join',\n action_arguments=[100],\n action_code='',\n action_options=dict(\n left_on=['store'],\n right_on=['store_name'],\n drop_columns=['store_name'],\n rename_columns={'description': 'store_description'}\n ),\n action_variables=dict(),\n outputs=[\n {\n 'source_feature': {\n 'uuid': 'store_name',\n },\n 'uuid': 'store_name',\n },\n {\n 'source_feature': {\n 'uuid': 'description',\n },\n 'uuid': 'description',\n },\n {\n 'source_feature': {\n 'uuid': 'date',\n },\n 'uuid': 'date_1',\n }\n ]\n ))\n df_new = base_action.execute(df1, df_to_join=df2)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-03', 1050, 'Store A', '2020-02-01'],\n ['a', '2020-01-01', 1000, 'Store A', '2020-02-01'],\n ['b', '2020-01-04', 990, 'Store B', '2020-02-02'],\n ['a', '2020-01-02', 1100, 'Store A', '2020-02-01'],\n ['b', '2020-01-03', 1200, 'Store B', '2020-02-02'],\n ['c', '2020-01-07', 1250, np.NaN, np.NaN],\n ])\n self.assertEqual(df_new.columns.to_list(), [\n 'store', 'date', 'sold', 'store_description', 'date_1',\n ])\n\n def test_join_rename_join_key(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A', '2020-02-01'],\n ['b', 'Store B', '2020-02-02'],\n ], columns=[\n 'store',\n 'description',\n 'date',\n ])", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(\n action_type='shift_down',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='prev_sold')]\n ),\n ],\n ))\n df_new = base_action.execute(df)\n df_new = df_new.fillna(0)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-01', 1000, 0, 0],\n ['a', '2020-01-02', 1100, 100, 1000],\n ['a', '2020-01-03', 1050, -50, 1100],\n ['b', '2020-01-03', 1200, 0, 0],\n ['b', '2020-01-04', 990, -210, 1200],\n ])\n\n def test_hydrate_action(self):\n base_action = BaseAction(TEST_ACTION)\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_hydrate_action_when_adding_column(self):\n base_action = BaseAction(merge_dict(TEST_ACTION, dict(\n action_type='add',\n axis='column',\n )))\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_type'] = 'add'\n hydrated_action['axis'] = 'column'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_join(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A'],\n ['b', 'Store B'],\n ], columns=[\n 'store_name',\n 'description',\n ])\n base_action = BaseAction(dict(", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(\n action_type='shift_down',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='prev_sold')]\n ),\n ],\n ))\n df_new = base_action.execute(df)\n df_new = df_new.fillna(0)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-01', 1000, 0, 0],\n ['a', '2020-01-02', 1100, 100, 1000],\n ['a', '2020-01-03', 1050, -50, 1100],\n ['b', '2020-01-03', 1200, 0, 0],\n ['b', '2020-01-04', 990, -210, 1200],\n ])\n\n def test_hydrate_action(self):\n base_action = BaseAction(TEST_ACTION)\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_hydrate_action_when_adding_column(self):\n base_action = BaseAction(merge_dict(TEST_ACTION, dict(\n action_type='add',\n axis='column',\n )))\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_type'] = 'add'\n hydrated_action['axis'] = 'column'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_join(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A'],\n ['b', 'Store B'],\n ], columns=[\n 'store_name',\n 'description',\n ])\n base_action = BaseAction(dict(\n action_type='join',\n action_arguments=[100],\n action_code='',\n action_options=dict(\n left_on=['store'],\n right_on=['store_name'],\n drop_columns=['store_name'],\n rename_columns={'description': 'store_description'}\n ),\n action_variables=dict(),\n ))\n df_new = base_action.execute(df1, df_to_join=df2)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-03', 1050, 'Store A'],\n ['a', '2020-01-01', 1000, 'Store A'],\n ['b', '2020-01-04', 990, 'Store B'],\n ['a', '2020-01-02', 1100, 'Store A'],\n ['b', '2020-01-03', 1200, 'Store B'],\n ['c', '2020-01-07', 1250, np.NaN],\n ])\n self.assertEqual(df_new.columns.to_list(), [\n 'store', 'date', 'sold', 'store_description',\n ])\n\n def test_join_rename_column(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A', '2020-02-01'],\n ['b', 'Store B', '2020-02-02'],\n ], columns=[\n 'store_name',\n 'description',\n 'date',\n ])\n base_action = BaseAction(dict(\n action_type='join',\n action_arguments=[100],\n action_code='',\n action_options=dict(\n left_on=['store'],\n right_on=['store_name'],\n drop_columns=['store_name'],\n rename_columns={'description': 'store_description'}\n ),\n action_variables=dict(),\n outputs=[\n {\n 'source_feature': {\n 'uuid': 'store_name',\n },\n 'uuid': 'store_name',\n },\n {\n 'source_feature': {\n 'uuid': 'description',\n },\n 'uuid': 'description',\n },\n {\n 'source_feature': {\n 'uuid': 'date',\n },\n 'uuid': 'date_1',\n }\n ]\n ))\n df_new = base_action.execute(df1, df_to_join=df2)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-03', 1050, 'Store A', '2020-02-01'],\n ['a', '2020-01-01', 1000, 'Store A', '2020-02-01'],\n ['b', '2020-01-04', 990, 'Store B', '2020-02-02'],\n ['a', '2020-01-02', 1100, 'Store A', '2020-02-01'],\n ['b', '2020-01-03', 1200, 'Store B', '2020-02-02'],\n ['c', '2020-01-07', 1250, np.NaN, np.NaN],\n ])\n self.assertEqual(df_new.columns.to_list(), [\n 'store', 'date', 'sold', 'store_description', 'date_1',\n ])\n\n def test_join_rename_join_key(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A', '2020-02-01'],\n ['b', 'Store B', '2020-02-02'],\n ], columns=[\n 'store',\n 'description',\n 'date',\n ])\n base_action = BaseAction(dict(\n action_type='join',\n action_arguments=[100],\n action_code='',\n action_options=dict(\n left_on=['store'],\n right_on=['store'],\n ),\n action_variables=dict(),\n outputs=[\n {\n 'source_feature': {\n 'uuid': 'store',\n },\n 'uuid': 'store_1',\n },\n {\n 'source_feature': {\n 'uuid': 'description',\n },\n 'uuid': 'description',\n },\n {\n 'source_feature': {\n 'uuid': 'date',\n },\n 'uuid': 'date_1',\n }\n ]\n ))", "type": "common" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:", "type": "common" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n", "type": "common" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(\n action_type='shift_down',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='prev_sold')]\n ),\n ],\n ))\n df_new = base_action.execute(df)\n df_new = df_new.fillna(0)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-01', 1000, 0, 0],\n ['a', '2020-01-02', 1100, 100, 1000],\n ['a', '2020-01-03', 1050, -50, 1100],\n ['b', '2020-01-03', 1200, 0, 0],\n ['b', '2020-01-04', 990, -210, 1200],\n ])\n\n def test_hydrate_action(self):\n base_action = BaseAction(TEST_ACTION)\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(\n action_type='shift_down',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='prev_sold')]\n ),\n ],\n ))\n df_new = base_action.execute(df)\n df_new = df_new.fillna(0)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-01', 1000, 0, 0],\n ['a', '2020-01-02', 1100, 100, 1000],\n ['a', '2020-01-03', 1050, -50, 1100],\n ['b', '2020-01-03', 1200, 0, 0],\n ['b', '2020-01-04', 990, -210, 1200],\n ])\n\n def test_hydrate_action(self):\n base_action = BaseAction(TEST_ACTION)\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(\n action_type='shift_down',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='prev_sold')]\n ),\n ],\n ))\n df_new = base_action.execute(df)\n df_new = df_new.fillna(0)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-01', 1000, 0, 0],\n ['a', '2020-01-02', 1100, 100, 1000],\n ['a', '2020-01-03', 1050, -50, 1100],\n ['b', '2020-01-03', 1200, 0, 0],\n ['b', '2020-01-04', 990, -210, 1200],\n ])\n\n def test_hydrate_action(self):\n base_action = BaseAction(TEST_ACTION)\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_hydrate_action_when_adding_column(self):\n base_action = BaseAction(merge_dict(TEST_ACTION, dict(\n action_type='add',\n axis='column',\n )))\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_type'] = 'add'\n hydrated_action['axis'] = 'column'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_join(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A'],\n ['b', 'Store B'],\n ], columns=[\n 'store_name',\n 'description',\n ])\n base_action = BaseAction(dict(\n action_type='join',\n action_arguments=[100],\n action_code='',\n action_options=dict(\n left_on=['store'],\n right_on=['store_name'],\n drop_columns=['store_name'],\n rename_columns={'description': 'store_description'}\n ),\n action_variables=dict(),\n ))\n df_new = base_action.execute(df1, df_to_join=df2)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-03', 1050, 'Store A'],\n ['a', '2020-01-01', 1000, 'Store A'],\n ['b', '2020-01-04', 990, 'Store B'],\n ['a', '2020-01-02', 1100, 'Store A'],\n ['b', '2020-01-03', 1200, 'Store B'],\n ['c', '2020-01-07', 1250, np.NaN],\n ])\n self.assertEqual(df_new.columns.to_list(), [\n 'store', 'date', 'sold', 'store_description',\n ])\n\n def test_join_rename_column(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A', '2020-02-01'],\n ['b', 'Store B', '2020-02-02'],\n ], columns=[\n 'store_name',\n 'description',\n 'date',\n ])\n base_action = BaseAction(dict(\n action_type='join',\n action_arguments=[100],\n action_code='',\n action_options=dict(\n left_on=['store'],\n right_on=['store_name'],\n drop_columns=['store_name'],\n rename_columns={'description': 'store_description'}\n ),\n action_variables=dict(),\n outputs=[\n {\n 'source_feature': {\n 'uuid': 'store_name',\n },\n 'uuid': 'store_name',\n },\n {\n 'source_feature': {\n 'uuid': 'description',\n },\n 'uuid': 'description',\n },\n {\n 'source_feature': {\n 'uuid': 'date',\n },\n 'uuid': 'date_1',\n }\n ]\n ))\n df_new = base_action.execute(df1, df_to_join=df2)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-03', 1050, 'Store A', '2020-02-01'],\n ['a', '2020-01-01', 1000, 'Store A', '2020-02-01'],\n ['b', '2020-01-04', 990, 'Store B', '2020-02-02'],\n ['a', '2020-01-02', 1100, 'Store A', '2020-02-01'],\n ['b', '2020-01-03', 1200, 'Store B', '2020-02-02'],\n ['c', '2020-01-07', 1250, np.NaN, np.NaN],\n ])\n self.assertEqual(df_new.columns.to_list(), [", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.tests.transformer_actions.shared import TEST_ACTION\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.shared.hash import merge_dict\nimport numpy as np\nimport pandas as pd\n\n\ndef build_df():\n return pd.DataFrame([\n [2, False, 5.0],\n ['$3', False, '$6.0', 1],\n ['$4,000', None, '$7,000', 200],\n ['$3', False, '$4.0', 3],\n ['$4,000', None, 3.0, 4],\n [5, True, 8000, 5],\n ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index')\n\n\nclass BaseActionTests(TestCase):\n # def test_execute(self):\n # df = build_df()\n\n # base_action = BaseAction(merge_dict(\n # TEST_ACTION,\n # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'),\n # ))\n\n # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [\n # ['$3', False, '$6.0'],\n # ['$4,000', None, '$7,000'],\n # ])\n\n def test_execute_axis_column(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n # '%{3_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n df_new = base_action.execute(df)\n self.assertEqual(df_new.values.tolist(), [\n [False, 5.0],\n [False, '$6.0'],\n [None, '$7,000'],\n [False, '$4.0'],\n [None, 3.0],\n [True, 8000],\n ])\n\n def test_execute_with_no_columns_to_transform(self):\n df = build_df()\n\n base_action = BaseAction(merge_dict(\n TEST_ACTION,\n dict(\n action_arguments=[\n '%{1_1}',\n ],\n action_type='remove',\n axis='column',\n ),\n ))\n\n raised = False\n try:\n base_action.execute(df.drop(columns=['deposited']))\n except Exception:\n raised = True\n\n self.assertFalse(raised)\n\n def test_groupby(self):\n df = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n base_action = BaseAction(dict(\n action_type='group',\n action_arguments=['store'],\n action_code='',\n action_variables=dict(),\n child_actions=[\n dict(\n action_type='sort',\n axis='row',\n action_arguments=['date'],\n action_code='',\n action_variables=dict(),\n ),\n dict(\n action_type='diff',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='sold_diff')]\n ),\n dict(\n action_type='shift_down',\n action_arguments=['sold'],\n action_code='',\n action_variables=dict(),\n axis='column',\n outputs=[dict(uuid='prev_sold')]\n ),\n ],\n ))\n df_new = base_action.execute(df)\n df_new = df_new.fillna(0)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-01', 1000, 0, 0],\n ['a', '2020-01-02', 1100, 100, 1000],\n ['a', '2020-01-03', 1050, -50, 1100],\n ['b', '2020-01-03', 1200, 0, 0],\n ['b', '2020-01-04', 990, -210, 1200],\n ])\n\n def test_hydrate_action(self):\n base_action = BaseAction(TEST_ACTION)\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_hydrate_action_when_adding_column(self):\n base_action = BaseAction(merge_dict(TEST_ACTION, dict(\n action_type='add',\n axis='column',\n )))\n base_action.hydrate_action()\n\n hydrated_action = TEST_ACTION.copy()\n hydrated_action['action_code'] = \\\n 'omni.deposited == True and (omni.fund == \"The Quant\" or omni.fund == \"Yield\")'\n hydrated_action['action_type'] = 'add'\n hydrated_action['axis'] = 'column'\n hydrated_action['action_arguments'] = [\n 'omni.deposited',\n 'magic.spell',\n ]\n hydrated_action['action_options'] = dict(\n condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000',\n default=0,\n timestamp_feature_a='omni.fund',\n timestamp_feature_b='omni.delivered_at',\n window=2592000,\n )\n\n self.assertEqual(base_action.action, hydrated_action)\n\n def test_join(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A'],\n ['b', 'Store B'],\n ], columns=[\n 'store_name',\n 'description',\n ])\n base_action = BaseAction(dict(\n action_type='join',\n action_arguments=[100],\n action_code='',\n action_options=dict(\n left_on=['store'],\n right_on=['store_name'],\n drop_columns=['store_name'],\n rename_columns={'description': 'store_description'}\n ),\n action_variables=dict(),\n ))\n df_new = base_action.execute(df1, df_to_join=df2)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-03', 1050, 'Store A'],\n ['a', '2020-01-01', 1000, 'Store A'],\n ['b', '2020-01-04', 990, 'Store B'],\n ['a', '2020-01-02', 1100, 'Store A'],\n ['b', '2020-01-03', 1200, 'Store B'],\n ['c', '2020-01-07', 1250, np.NaN],\n ])\n self.assertEqual(df_new.columns.to_list(), [\n 'store', 'date', 'sold', 'store_description',\n ])\n\n def test_join_rename_column(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A', '2020-02-01'],\n ['b', 'Store B', '2020-02-02'],\n ], columns=[\n 'store_name',\n 'description',\n 'date',\n ])\n base_action = BaseAction(dict(\n action_type='join',\n action_arguments=[100],\n action_code='',\n action_options=dict(\n left_on=['store'],\n right_on=['store_name'],\n drop_columns=['store_name'],\n rename_columns={'description': 'store_description'}\n ),\n action_variables=dict(),\n outputs=[\n {\n 'source_feature': {\n 'uuid': 'store_name',\n },\n 'uuid': 'store_name',\n },\n {\n 'source_feature': {\n 'uuid': 'description',\n },\n 'uuid': 'description',\n },\n {\n 'source_feature': {\n 'uuid': 'date',\n },\n 'uuid': 'date_1',\n }\n ]\n ))\n df_new = base_action.execute(df1, df_to_join=df2)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-03', 1050, 'Store A', '2020-02-01'],\n ['a', '2020-01-01', 1000, 'Store A', '2020-02-01'],\n ['b', '2020-01-04', 990, 'Store B', '2020-02-02'],\n ['a', '2020-01-02', 1100, 'Store A', '2020-02-01'],\n ['b', '2020-01-03', 1200, 'Store B', '2020-02-02'],\n ['c', '2020-01-07', 1250, np.NaN, np.NaN],\n ])\n self.assertEqual(df_new.columns.to_list(), [\n 'store', 'date', 'sold', 'store_description', 'date_1',\n ])\n\n def test_join_rename_join_key(self):\n df1 = pd.DataFrame([\n ['a', '2020-01-03', 1050],\n ['a', '2020-01-01', 1000],\n ['b', '2020-01-04', 990],\n ['a', '2020-01-02', 1100],\n ['b', '2020-01-03', 1200],\n ['c', '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['a', 'Store A', '2020-02-01'],\n ['b', 'Store B', '2020-02-02'],\n ], columns=[\n 'store',\n 'description',\n 'date',\n ])\n base_action = BaseAction(dict(\n action_type='join',\n action_arguments=[100],\n action_code='',\n action_options=dict(\n left_on=['store'],\n right_on=['store'],\n ),\n action_variables=dict(),\n outputs=[\n {\n 'source_feature': {\n 'uuid': 'store',\n },\n 'uuid': 'store_1',\n },\n {\n 'source_feature': {\n 'uuid': 'description',\n },\n 'uuid': 'description',\n },\n {\n 'source_feature': {\n 'uuid': 'date',\n },\n 'uuid': 'date_1',\n }\n ]\n ))\n df_new = base_action.execute(df1, df_to_join=df2)\n self.assertEqual(df_new.values.tolist(), [\n ['a', '2020-01-03', 1050, 'a', 'Store A', '2020-02-01'],\n ['a', '2020-01-01', 1000, 'a', 'Store A', '2020-02-01'],\n ['b', '2020-01-04', 990, 'b', 'Store B', '2020-02-02'],\n ['a', '2020-01-02', 1100, 'a', 'Store A', '2020-02-01'],\n ['b', '2020-01-03', 1200, 'b', 'Store B', '2020-02-02'],\n ['c', '2020-01-07', 1250, np.NaN, np.NaN, np.NaN],\n ])\n self.assertEqual(df_new.columns.to_list(), [\n 'store', 'date', 'sold', 'store_1', 'description', 'date_1',\n ])\n\n def test_join_cast_to_str(self):\n df1 = pd.DataFrame([\n [1, '2020-01-03', 1050],\n [1, '2020-01-01', 1000],\n [2, '2020-01-04', 990],\n [1, '2020-01-02', 1100],\n [2, '2020-01-03', 1200],\n [3, '2020-01-07', 1250],\n ], columns=[\n 'store',\n 'date',\n 'sold',\n ])\n df2 = pd.DataFrame([\n ['1', 'Store A'],\n ['2', 'Store B'],\n ], columns=[\n 'store_name',\n 'description',\n ])\n base_action = BaseAction(dict(\n action_type='join',\n action_arguments=[100],\n action_code='',\n action_options=dict(\n left_on=['store'],\n right_on=['store_name'],\n drop_columns=['store_name'],\n rename_columns={'description': 'store_description'}\n ),\n action_variables=dict(),\n ))\n df_new = base_action.execute(df1, df_to_join=df2)", "type": "random" } ]
[ " df = build_df()", " base_action = BaseAction(TEST_ACTION)", " base_action.hydrate_action()", " base_action = BaseAction(merge_dict(TEST_ACTION, dict(", " action_type='add',", " axis='column',", " base_action = BaseAction(merge_dict(", " action_type='remove',", " base_action = BaseAction(dict(", " action_type='join',", " action_type='group',", " action_type='sort',", " axis='row',", " action_type='diff',", " action_type='shift_down',", " df_new = base_action.execute(df1, df_to_join=df2)", " base_action.execute(df.drop(columns=['deposited']))", " df_new = base_action.execute(df)", "", "import numpy as np", " def test_hydrate_action_when_adding_column(self):", " # ))", " window=2592000,", " 'store', 'date', 'sold', 'store_description', 'date_1',", " except Exception:", " TEST_ACTION,", " self.assertEqual(df_new.values.tolist(), [" ]
METASEP
20
mage-ai__mage-ai
mage-ai__mage-ai METASEP cleaning/__init__.py METASEP src/data_cleaner/transformer_actions/udf/substring.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Substring(BaseUDF): def execute(self): start = self.options.get('start') stop = self.options.get('stop') if start is None and stop is None: raise Exception('Require at least one of `start` and `stop` parameters.') return self.df[self.arguments[0]].str.slice(start=start, stop=stop) src/data_cleaner/transformer_actions/udf/string_split.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class StringSplit(BaseUDF): def execute(self): separator = self.options.get('separator') part_index = self.options.get('part_index') if separator is None or part_index is None: raise Exception('Require both `separator` and `part_index` parameters.') return self.df[self.arguments[0]].str.split(separator).str[part_index].str.strip() src/data_cleaner/transformer_actions/udf/string_replace.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class StringReplace(BaseUDF): def execute(self): pattern = self.options.get('pattern') replacement = self.options.get('replacement') if not pattern and not replacement: raise Exception(f'Require both `pattern` and `replacement` parameters.') return self.df[self.arguments[0]].str.replace(pattern, replacement) src/data_cleaner/transformer_actions/udf/multiply.py METASEP from transformer_actions.udf.base import BaseUDF class Multiply(BaseUDF): def execute(self): col1 = self.arguments[0] if len(self.arguments) > 1: col2 = self.arguments[1] return self.df[col1].astype(float) * self.df[col2].astype(float) elif self.options.get('value') is not None: return self.df[col1] * float(self.options['value']) raise Exception('Require second column or a value to multiply.') src/data_cleaner/transformer_actions/udf/if_else.py METASEP from data_cleaner.transformer_actions.action_code import query_with_action_code from data_cleaner.transformer_actions.udf.base import BaseUDF class IfElse(BaseUDF): def execute(self): df_copy = self.df.copy() true_index = query_with_action_code(df_copy, self.code, self.kwargs).index arg1_type = self.options.get('arg1_type', 'value') arg2_type = self.options.get('arg2_type', 'value') arg1 = self.arguments[0] if arg1_type == 'column': arg1 = df_copy[arg1] arg2 = self.arguments[1] if arg2_type == 'column': arg2 = df_copy[arg2] df_copy.loc[true_index, 'result'] = arg1 df_copy['result'] = df_copy['result'].fillna(arg2) return df_copy['result'] src/data_cleaner/transformer_actions/udf/formatted_date.py METASEP from transformer_actions.udf.base import BaseUDF import pandas as pd class FormattedDate(BaseUDF): def execute(self): return pd.to_datetime( self.df[self.arguments[0]], ).dt.strftime(self.options['format']) src/data_cleaner/transformer_actions/udf/divide.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Divide(BaseUDF): def execute(self): col1 = self.arguments[0] if len(self.arguments) > 1: col2 = self.arguments[1] return self.df[col1].astype(float) / self.df[col2].astype(float) elif self.options.get('value') is not None: return self.df[col1] / float(self.options['value']) raise Exception('Require second column or a value to divide.') src/data_cleaner/transformer_actions/udf/distance_between.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np EARTH_RADIUS = 6371 class DistanceBetween(BaseUDF): def execute(self): def __haversine(lat1, lng1, lat2, lng2): lat1, lng1, lat2, lng2 = np.radians([lat1, lng1, lat2, lng2]) a = np.sin((lat2-lat1)/2.0)**2 + \ np.cos(lat1) * np.cos(lat2) * np.sin((lng2-lng1)/2.0)**2 return EARTH_RADIUS * 2 * np.arcsin(np.sqrt(a)) return __haversine( self.df[self.arguments[0]], self.df[self.arguments[1]], self.df[self.arguments[2]], self.df[self.arguments[3]], ) src/data_cleaner/transformer_actions/udf/difference.py METASEP from data_cleaner.column_type_detector import DATETIME from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np import pandas as pd class Difference(BaseUDF): def execute(self): col1 = self.arguments[0] column_type = self.options.get('column_type', self.kwargs.get('column_types', {}).get(col1)) if len(self.arguments) > 1: col2 = self.arguments[1] return self.__difference_between_columns( self.df[col1], self.df[col2], column_type=column_type, options=self.options, ) elif self.options.get('value') is not None: return self.__subtract_value( self.df[col1], self.options['value'], column_type=column_type, options=self.options, ) raise Exception('Require second column or a value to minus.') def __difference_between_columns(self, column1, column2, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return (pd.to_datetime(column1, utc=True) - pd.to_datetime(column2, utc=True)).dt.days return column1 - column2 def __subtract_value(self, original_column, value, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return ( pd.to_datetime(original_column, utc=True) - pd.to_timedelta(value, unit=time_unit) ).dt.strftime('%Y-%m-%d %H:%M:%S') return original_column - value src/data_cleaner/transformer_actions/udf/date_trunc.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np import pandas as pd class DateTrunc(BaseUDF): def execute(self): date_part = self.options['date_part'] date_column = self.arguments[0] df_copy = self.df.copy() df_copy[date_column] = pd.to_datetime(df_copy[date_column]) if date_part == 'week': return (df_copy[date_column] - df_copy[date_column].dt.weekday * np.timedelta64(1, 'D')).\ dt.strftime('%Y-%m-%d') raise Exception(f'Date part {date_part} is not supported.') src/data_cleaner/transformer_actions/udf/constant.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Constant(BaseUDF): def execute(self): return self.arguments[0] src/data_cleaner/transformer_actions/udf/base.py METASEP import importlib class BaseUDF(): def __init__(self, df, arguments=[], code=None, options={}, kwargs={}): self.df = df self.arguments = arguments self.code = code self.options = options self.kwargs = kwargs def execute(self): pass def execute_udf(udf_name, df, arguments, code, options, kwargs): udf_class = getattr( importlib.import_module(f'transformer_actions.udf.{udf_name}'), udf_name.title().replace('_', ''), ) return udf_class(df, arguments, code, options, kwargs).execute() src/data_cleaner/transformer_actions/udf/addition.py METASEP from data_cleaner.column_type_detector import DATETIME from data_cleaner.transformer_actions.udf.base import BaseUDF import pandas as pd class Addition(BaseUDF): def execute(self): col1 = self.arguments[0] df_result = self.df[col1] column_type = self.options.get("column_type", self.kwargs.get('column_types', {}).get(col1)) if len(self.arguments) == 1 and 'value' not in self.options: raise Exception('Require second column or a value to add.') if len(self.arguments) > 1: for col in self.arguments[1:]: df_result = df_result + self.df[col] if self.options.get('value') is not None: df_result = self.__add_value( df_result, self.options['value'], column_type=column_type, options=self.options, ) return df_result def __add_value(self, original_column, value, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return ( pd.to_datetime(original_column, utc=True) + pd.to_timedelta(value, unit=time_unit) ).dt.strftime('%Y-%m-%d %H:%M:%S') return original_column + value src/data_cleaner/transformer_actions/udf/__init__.py METASEP src/server/hello.py METASEP from flask import Flask app = Flask(__name__) @app.route("/") def hello_world(): return "<p>Hello, World!</p>" src/data_cleaner/transformer_actions/variable_replacer.py METASEP from data_cleaner.transformer_actions.constants import VariableType import re def interpolate(text, key, variable_data): """ text: string to operate on key: key to search within text variable_data: dictionary containing data used to interpolate """ regex_replacement = key if variable_data['type'] == VariableType.FEATURE: regex_replacement = variable_data[VariableType.FEATURE]['uuid'] elif variable_data['type'] == VariableType.FEATURE_SET_VERSION: regex_replacement = \ variable_data[VariableType.FEATURE_SET_VERSION][VariableType.FEATURE_SET]['uuid'] regex_pattern = re.compile( '\%__BRACKETS_START__{}__BRACKETS_END__' .format(key) .replace('__BRACKETS_START__', '\{') .replace('__BRACKETS_END__', '\}') ) return re.sub(regex_pattern, regex_replacement, str(text)) def replace_true_false(action_code): regex_pattern_true = re.compile(' true') regex_pattern_false = re.compile(' false') return re.sub( regex_pattern_true, ' True', re.sub(regex_pattern_false, ' False', action_code), ) src/data_cleaner/transformer_actions/utils.py METASEP from data_cleaner.transformer_actions.constants import ActionType, Axis def columns_to_remove(transformer_actions): arr = filter( lambda x: x['action_type'] == ActionType.REMOVE and x['axis'] == Axis.COLUMN, transformer_actions, ) columns = [] for transformer_action in arr: columns += transformer_action['action_arguments'] return columns src/data_cleaner/transformer_actions/shared.py METASEP TEST_ACTION = dict( action_type='filter', axis='row', action_code='%{1}.%{1_1} == True and (%{1}.%{1_2} == \"The Quant\" or %{1}.%{1_2} == \"Yield\")', action_arguments=[ '%{1}.%{1_1}', '%{2}.%{2_1}', ], action_options=dict( condition='%{1}.%{1_3} >= %{2}.%{2_2} and %{2}.%{2_2} >= %{1}.%{1_3} - 2592000', default=0, timestamp_feature_a='%{1}.%{1_2}', timestamp_feature_b='%{1}.%{1_3}', window=2592000, ), action_variables={ '1': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='omni'), )), '1_1': dict(type='feature', feature=dict(column_type='number', uuid='deposited')), '1_2': dict(type='feature', feature=dict(column_type='category', uuid='fund')), '1_3': dict(type='feature', feature=dict(column_type='category', uuid='delivered_at')), '1_4': dict(type='feature', feature=dict(column_type='number_with_decimals', uuid='amount')), '2': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='magic'), )), '2_1': dict(type='feature', feature=dict(column_type='category', uuid='spell')), '2_2': dict(type='feature', feature=dict(column_type='category', uuid='booked_at')), '3_1': dict(type='feature', feature=dict(column_type='number', uuid='age')), }, ) src/data_cleaner/transformer_actions/row.py METASEP from data_cleaner.column_type_detector import NUMBER_TYPES from data_cleaner.transformer_actions.constants import VariableType from data_cleaner.transformer_actions.action_code import query_with_action_code import pandas as pd def drop_duplicates(df, action, **kwargs): keep = action.get('action_options', {}).get('keep', 'last') return df.drop_duplicates(subset=action['action_arguments'], keep=keep) def filter_rows(df, action, **kwargs): """ df: Pandas DataFrame action: TransformerAction serialized into a dictionary """ action_code = action['action_code'] return query_with_action_code(df, action_code, kwargs) def sort_rows(df, action, **kwargs): ascending = action.get('action_options', {}).get('ascending', True) ascendings = action.get('action_options', {}).get('ascendings', []) if len(ascendings) > 0: ascending = ascendings[0] feature_by_uuid = {} if action.get('action_variables'): for _, val in action['action_variables'].items(): feature = val.get('feature') if feature: feature_by_uuid[feature['uuid']] = feature na_indexes = None as_types = {} for idx, uuid in enumerate(action['action_arguments']): feature = feature_by_uuid.get(uuid) if feature and feature['column_type'] in NUMBER_TYPES: as_types[uuid] = float if idx == 0: na_indexes = df[(df[uuid].isnull()) | (df[uuid].astype(str).str.len() == 0)].index bad_df = None if na_indexes is not None: bad_df = df.index.isin(na_indexes) index = (df[~bad_df] if bad_df is not None else df).astype(as_types).sort_values( by=action['action_arguments'], ascending=ascendings if len(ascendings) > 0 else ascending, ).index df_final = df.loc[index] if bad_df is not None: if ascending: return pd.concat([ df.iloc[bad_df], df_final, ]) return pd.concat([ df_final, df.iloc[bad_df], ]) return df_final src/data_cleaner/transformer_actions/helpers.py METASEP from data_cleaner.column_type_detector import NUMBER, NUMBER_WITH_DECIMALS, TEXT from data_cleaner.transformer_actions.constants import ActionType, Operator, VariableType import numpy as np import re DAY_SECONDS = 86400 HOUR_SECONDS = 3600 def convert_col_type(df_col, col_type): if col_type == NUMBER: return df_col.replace(r'^\s*$', 0, regex=True).fillna(0).astype(np.int64) elif col_type == NUMBER_WITH_DECIMALS: return df_col.dropna().astype(float) elif col_type == TEXT: return df_col.dropna().astype(str) return df_col def convert_value_type(feature_uuid, action, value): action_variables = action.get('action_variables', {}) column_type = None for v in action_variables.values(): if v['type'] == 'feature' and v['feature']['uuid'] == feature_uuid: column_type = v['feature']['column_type'] break if column_type == NUMBER: value = int(value) elif column_type == NUMBER_WITH_DECIMALS: value = float(value) return value def drop_na(df): return df.replace(r'^\s*$', np.nan, regex=True).dropna() def extract_join_feature_set_version_id(payload): if payload['action_type'] != ActionType.JOIN: return None join_feature_set_version_id = payload['action_arguments'][0] if type(join_feature_set_version_id) == str and \ join_feature_set_version_id.startswith('%{'): join_feature_set_version_id = next( v['id'] for v in payload['action_variables'].values() if v['type'] == VariableType.FEATURE_SET_VERSION ) return join_feature_set_version_id def get_column_type(feature_uuid, action): action_variables = action.get('action_variables', {}) column_type = None for v in action_variables.values(): if v['type'] == 'feature' and v['feature']['uuid'] == feature_uuid: column_type = v['feature']['column_type'] break return column_type def get_time_window_str(window_in_seconds): if window_in_seconds is None: return None if window_in_seconds >= DAY_SECONDS: time_window = f'{int(window_in_seconds / DAY_SECONDS)}d' elif window_in_seconds >= HOUR_SECONDS: time_window = f'{int(window_in_seconds / HOUR_SECONDS)}h' else: time_window = f'{window_in_seconds}s' return time_window src/data_cleaner/transformer_actions/constants.py METASEP class ActionType(): ADD = 'add' AVERAGE = 'average' COUNT = 'count' COUNT_DISTINCT = 'count_distinct' DIFF = 'diff' DROP_DUPLICATE = 'drop_duplicate' EXPAND_COLUMN = 'expand_column' EXPLODE = 'explode' FILTER = 'filter' FIRST = 'first' GROUP = 'group' IMPUTE = 'impute' JOIN = 'join' LAST = 'last' LIMIT = 'limit' MAX = 'max' MEDIAN = 'median' MIN = 'min' MODE = 'mode' REMOVE = 'remove' SCALE = 'scale' SELECT = 'select' SHIFT_DOWN = 'shift_down' SHIFT_UP = 'shift_up' SORT = 'sort' SUM = 'sum' UNION = 'union' UPDATE_TYPE = 'update_type' UPDATE_VALUE = 'update_value' class Axis(): COLUMN = 'column' ROW = 'row' class VariableType(): FEATURE = 'feature' FEATURE_SET = 'feature_set' FEATURE_SET_VERSION = 'feature_set_version' class Operator(): CONTAINS = 'contains' NOT_CONTAINS = 'not contains' EQUALS = '==' NOT_EQUALS = '!=' GREATER_THAN = '>' GREATER_THAN_OR_EQUALS = '>=' LESS_THAN = '<' LESS_THAN_OR_EQUALS = '<=' src/data_cleaner/transformer_actions/column.py METASEP from data_cleaner.transformer_actions.action_code import query_with_action_code from data_cleaner.transformer_actions.helpers import ( convert_col_type, get_column_type, get_time_window_str, ) from data_cleaner.transformer_actions.udf.base import execute_udf import pandas as pd import numpy as np def add_column(df, action, **kwargs): col = action['outputs'][0]['uuid'] col_type = action['outputs'][0]['column_type'] udf = action['action_options'].get('udf') if udf is None: return df df_copy = df.copy() df_copy[col] = execute_udf( udf, df, action.get('action_arguments'), action.get('action_code'), action.get('action_options'), kwargs, ) df_copy[col] = convert_col_type(df_copy[col], col_type) return df_copy def average(df, action, **kwargs): return __agg(df, action, 'mean') def count(df, action, **kwargs): return __groupby_agg(df, action, 'count') def count_distinct(df, action, **kwargs): return __groupby_agg(df, action, 'nunique') def diff(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].diff() return df def first(df, action, **kwargs): return __agg(df, action, 'first') def impute(df, action, **kwargs): columns = action['action_arguments'] action_options = action['action_options'] strategy = action_options.get('strategy') value = action_options.get('value') empty_string_pattern = r'^\s*$' df[columns] = df[columns].replace(empty_string_pattern, np.nan, regex=True) if strategy == 'average': df[columns] = df[columns].fillna(df[columns].astype(float).mean(axis=0)) elif strategy == 'median': df[columns] = df[columns].fillna(df[columns].astype(float).median(axis=0)) elif strategy == 'column': replacement_df = pd.DataFrame({col: df[value] for col in columns}) df[columns] = df[columns].fillna(replacement_df) elif value is not None: df[columns] = df[columns].fillna(value) else: raise Exception('Require a valid strategy or value') for col in columns: col_type = get_column_type(col, action) df[col] = convert_col_type(df[col], col_type) return df def max(df, action, **kwargs): return __agg(df, action, 'max') def median(df, action, **kwargs): return __agg(df, action, 'median') def min(df, action, **kwargs): return __agg(df, action, 'min') def remove_column(df, action, **kwargs): cols = action['action_arguments'] original_columns = df.columns drop_columns = [col for col in cols if col in original_columns] return df.drop(columns=drop_columns) def last(df, action, **kwargs): return __agg(df, action, 'last') def select(df, action, **kwargs): return df[action['action_arguments']] def shift_down(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] action_options = action.get('action_options', {}) groupby_columns = action_options.get('groupby_columns') periods = action_options.get('periods', 1) if groupby_columns is not None: df[output_col] = df.groupby(groupby_columns)[action['action_arguments'][0]].shift(periods) else: df[output_col] = df[action['action_arguments'][0]].shift(periods) return df def shift_up(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].shift(-1) return df def sum(df, action, **kwargs): return __agg(df, action, 'sum') def __agg(df, action, agg_method): if action['action_options'].get('groupby_columns'): return __groupby_agg(df, action, agg_method) else: output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].agg(agg_method) return df def __column_mapping(action): return dict(zip(action['action_arguments'], [o['uuid'] for o in action['outputs']])) # Filter by timestamp_feature_a - window <= timestamp_feature_b <= timestamp_feature_a def __filter_df_with_time_window(df, action): action_options = action['action_options'] time_window_keys = ['timestamp_feature_a', 'timestamp_feature_b', 'window'] if all(k in action_options for k in time_window_keys): window_in_seconds = action_options['window'] df_time_diff = \ (pd.to_datetime(df[action_options['timestamp_feature_a']], utc=True) - \ pd.to_datetime(df[action_options['timestamp_feature_b']], utc=True)).dt.total_seconds() if window_in_seconds > 0: df_time_diff_filtered = \ df_time_diff[(df_time_diff <= window_in_seconds) & (df_time_diff >= 0)] else: df_time_diff_filtered = \ df_time_diff[(df_time_diff >= window_in_seconds) & (df_time_diff <= 0)] df_filtered = df.loc[df_time_diff_filtered.index] time_window = get_time_window_str(window_in_seconds) else: df_filtered = df time_window = None return df_filtered, time_window def __groupby_agg(df, action, agg_method): df_filtered, _ = __filter_df_with_time_window(df, action) action_code = action.get('action_code') if action_code is not None and action_code != '': df_filtered = query_with_action_code(df_filtered, action_code, { 'original_df': df_filtered, }) action_options = action['action_options'] df_agg = df_filtered.groupby( action_options['groupby_columns'], )[action['action_arguments']].agg(agg_method) return df.merge( df_agg.rename(columns=__column_mapping(action)), on=action_options['groupby_columns'], how='left', ) src/data_cleaner/transformer_actions/base.py METASEP from data_cleaner.transformer_actions import column, row from data_cleaner.transformer_actions.constants import ActionType, Axis, VariableType from data_cleaner.transformer_actions.helpers import drop_na from data_cleaner.transformer_actions.variable_replacer import ( interpolate, replace_true_false, ) # from pipelines.column_type_pipelines import COLUMN_TYPE_PIPELINE_MAPPING import json COLUMN_TYPE_PIPELINE_MAPPING = {} FUNCTION_MAPPING = { Axis.COLUMN: { ActionType.ADD: column.add_column, ActionType.AVERAGE: column.average, ActionType.COUNT: column.count, ActionType.COUNT_DISTINCT: column.count_distinct, ActionType.DIFF: column.diff, # ActionType.EXPAND_COLUMN: column.expand_column, ActionType.FIRST: column.first, ActionType.IMPUTE: column.impute, ActionType.LAST: column.last, ActionType.MAX: column.max, ActionType.MEDIAN: column.median, ActionType.MIN: column.min, ActionType.REMOVE: column.remove_column, ActionType.SELECT: column.select, ActionType.SHIFT_DOWN: column.shift_down, ActionType.SHIFT_UP: column.shift_up, ActionType.SUM: column.sum, }, Axis.ROW: { ActionType.DROP_DUPLICATE: row.drop_duplicates, # ActionType.EXPLODE: row.explode, ActionType.FILTER: row.filter_rows, ActionType.SORT: row.sort_rows, }, } class BaseAction(): def __init__(self, action): self.action = action self.columns_by_type = {} for variable_data in self.action.get('action_variables', {}).values(): if not variable_data: continue feature = variable_data.get(VariableType.FEATURE) if not feature: continue column_type = feature.get('column_type') if not self.columns_by_type.get(column_type): self.columns_by_type[column_type] = [] self.columns_by_type[column_type].append(feature['uuid']) @property def action_type(self): return self.action['action_type'] @property def axis(self): return self.action['axis'] def execute(self, df, **kwargs): self.hydrate_action() self.action['action_code'] = replace_true_false(self.action['action_code']) if df.empty: return df if self.action_type in [ActionType.FILTER, ActionType.ADD]: df_transformed = self.transform(df) else: df_transformed = df if self.action_type == ActionType.GROUP: df_output = self.groupby(df, self.action) elif self.action_type == ActionType.JOIN: df_to_join = kwargs.get('df_to_join') df_output = self.join(df, df_to_join, self.action) else: column_types = {} for column_type, cols in self.columns_by_type.items(): for col in cols: column_types[col] = column_type df_output = FUNCTION_MAPPING[self.axis][self.action_type]( df_transformed, self.action, column_types=column_types, original_df=df, ) if self.action_type == ActionType.FILTER: return df.loc[df_output.index][df_output.columns] elif self.action_type == ActionType.ADD: output_cols = [f['uuid'] for f in self.action['outputs']] df[output_cols] = df_output[output_cols] return df else: return df_output def groupby(self, df, action): def __transform_partition(pdf, actions): for action in actions: pdf = BaseAction(action).execute(pdf) return pdf groupby_columns = action['action_arguments'] return df.groupby(groupby_columns).apply(lambda x: __transform_partition(x, action['child_actions'])) def hydrate_action(self): for k, v in self.action['action_variables'].items(): """ k: 1, 1_1 v: { 'type': 'feature', 'id': 1, 'feature': { 'uuid': 'mage', }, } """ if not v: continue if self.action.get('action_code'): self.action['action_code'] = interpolate(self.action['action_code'], k, v) if self.action.get('action_arguments'): self.action['action_arguments'] = [interpolate( args_text, k, v, ) for args_text in self.action['action_arguments']] if self.action.get('action_options'): action_options_json = json.dumps(self.action['action_options']) self.action['action_options'] = json.loads(interpolate(action_options_json, k, v)) def join(self, df, df_to_join, action): action_options = action['action_options'] left_on = action_options['left_on'] right_on = action_options['right_on'] for i in range(len(left_on)): col1, col2 = left_on[i], right_on[i] if df[col1].dtype != df_to_join[col2].dtype: df[col1] = drop_na(df[col1]).astype(str) df_to_join[col2] = drop_na(df_to_join[col2]).astype(str) if action.get('outputs') is not None: feature_rename_mapping = { f['source_feature']['uuid']:f['uuid'] for f in action['outputs'] if f.get('source_feature') is not None } df_to_join_renamed = df_to_join.rename(columns=feature_rename_mapping) right_on = [feature_rename_mapping.get(key, key) for key in right_on] else: df_to_join_renamed = df_to_join how = action_options.get('how', 'left') df_merged = df.merge(df_to_join_renamed, left_on=left_on, right_on=right_on, how=how) drop_columns = action_options.get('drop_columns', []) rename_columns = action_options.get('rename_columns', {}) return df_merged.drop(columns=drop_columns).rename(columns=rename_columns) def transform(self, df): df_copy = df.copy() current_columns = df_copy.columns for column_type, original_columns in self.columns_by_type.items(): cols = [col for col in original_columns if col in current_columns] if len(cols) == 0: continue build_pipeline = COLUMN_TYPE_PIPELINE_MAPPING.get(column_type) if not build_pipeline: continue df_copy[cols] = build_pipeline().fit_transform(df_copy[cols]) return df_copy src/data_cleaner/transformer_actions/action_code.py METASEP from data_cleaner.transformer_actions.constants import Operator import re ACTION_CODE_CONDITION_PATTERN = re.compile( r'([^\s()]+) ([!=<>]+|(?:contains)|(?:not contains)) ([^\s()]+)' ) ORIGINAL_COLUMN_PREFIX = 'orig_' TRANSFORMED_COLUMN_PREFIX = 'tf_' def __query_mutate_null_type(match, dtype): condition = [''] column_name, operator, _ = match.groups() column_name = f'{ORIGINAL_COLUMN_PREFIX}{column_name}' if operator == '==': condition.append(f'({column_name}.isna()') if dtype == bool: condition.append(f' | {column_name} == \'\'') elif dtype == str: condition.append(f' | {column_name}.str.len() == 0') condition.append(f')') else: condition.append(f'({column_name}.notna()') if dtype == bool: condition.append(f' & {column_name} != \'\'') elif dtype == str: condition.append(f' & {column_name}.str.len() >= 1') condition.append(f')') return ''.join(condition) def __query_mutate_contains_op(match): column_name, operator, value = match.groups() column_name = f'{TRANSFORMED_COLUMN_PREFIX}{column_name}' value = value.strip('\'').strip('\"') if operator == Operator.CONTAINS: condition = f'({column_name}.notna() & {column_name}.str.contains(\'{value}\'))' else: condition = f'~({column_name}.notna() & {column_name}.str.contains(\'{value}\'))' return condition def __query_mutate_default_case(match, column_set): column_name, operator, value = match.groups() column_name = f'{TRANSFORMED_COLUMN_PREFIX}{column_name}' if value in column_set: # if comparison is with another column, prefix value with column identifier value = f'{TRANSFORMED_COLUMN_PREFIX}{value}' return f'{column_name} {operator} {value}' def __get_column_type(df, cache, column_name): dtype = cache.get(column_name, None) if dtype is None: dropped_na = df[column_name].dropna() dropped_na = dropped_na[~dropped_na.isin([''])] dtype = type(dropped_na.iloc[0]) if len(dropped_na.index) >= 1 else object cache[column_name] = dtype return dtype def query_with_action_code(df, action_code, kwargs): transformed_types, original_types = {}, {} original_df, original_merged = kwargs.get('original_df', None), False reconstructed_code = [] queried_df = df.copy().add_prefix(TRANSFORMED_COLUMN_PREFIX) column_set = set(df.columns) prev_end = 0 for match in ACTION_CODE_CONDITION_PATTERN.finditer(action_code): column_name, operator, value = match.groups() reconstructed_code.append(action_code[prev_end: match.start()]) prev_end = match.end() if operator == Operator.CONTAINS or operator == Operator.NOT_CONTAINS: transformed_dtype = __get_column_type(df, transformed_types, column_name) if transformed_dtype != str: raise TypeError( f'\'{operator}\' can only be used on string columns, {transformed_dtype}' ) reconstructed_code.append(__query_mutate_contains_op(match)) elif (operator == Operator.EQUALS or operator == Operator.NOT_EQUALS) and value == 'null': if original_df is None: raise Exception( 'Null value queries require original dataframe as keyword argument' ) elif not original_merged: queried_df = queried_df.join(original_df.add_prefix(ORIGINAL_COLUMN_PREFIX)) original_merged = True original_dtype = __get_column_type(original_df, original_types, column_name) reconstructed_code.append(__query_mutate_null_type(match, original_dtype)) else: reconstructed_code.append(__query_mutate_default_case(match, column_set)) reconstructed_code.append(action_code[prev_end:]) action_code = ''.join(reconstructed_code) queried_df = queried_df.query(action_code).rename( lambda x: x[len(TRANSFORMED_COLUMN_PREFIX):], axis='columns' ) return queried_df[df.columns] src/data_cleaner/transformer_actions/__init__.py METASEP src/data_cleaner/statistics/calculator.py METASEP from data_cleaner.shared.hash import merge_dict from data_cleaner.shared.multi import run_parallel from data_cleaner.column_type_detector import ( DATETIME, NUMBER, NUMBER_TYPES, NUMBER_WITH_DECIMALS, ) import math import numpy as np import pandas as pd import traceback VALUE_COUNT_LIMIT = 255 def increment(metric, tags): pass class timer(object): """ with timer('metric.metric', tags={ 'key': 'value' }): function() """ def __init__(self, metric, tags={}): pass def __enter__(self): pass def __exit__(self, type, value, traceback): pass class StatisticsCalculator(): def __init__( self, # s3_client, # object_key_prefix, # feature_set_version, column_types, **kwargs, ): self.column_types = column_types @property def data_tags(self): return dict() def process(self, df): return self.calculate_statistics_overview(df) def calculate_statistics_overview(self, df): increment( 'lambda.transformer_actions.calculate_statistics_overview.start', self.data_tags, ) with timer( 'lambda.transformer_actions.calculate_statistics_overview.time', self.data_tags): data = dict(count=len(df.index)) arr_args_1 = [df[col] for col in df.columns], arr_args_2 = [col for col in df.columns], dicts = run_parallel(self.statistics_overview, arr_args_1, arr_args_2) for d in dicts: data.update(d) # object_key = s3_paths.path_statistics_overview(self.object_key_prefix) # s3_data.upload_json_sorted(self.s3_client, object_key, data) increment( 'lambda.transformer_actions.calculate_statistics_overview.success', self.data_tags, ) return data def statistics_overview(self, series, col): try: return self.__statistics_overview(series, col) except Exception as err: increment( 'lambda.transformer_actions.calculate_statistics_overview.column.failed', merge_dict(self.data_tags, { 'col': col, 'error': err.__class__.__name__, }), ) traceback.print_exc() return {} def __statistics_overview(self, series, col): # The following regex based replace has high overheads # series = series.replace(r'^\s*$', np.nan, regex=True) series_cleaned = series.map(lambda x: x if (not isinstance(x, str) or (len(x) > 0 and not x.isspace())) else np.nan) df_value_counts = series_cleaned.value_counts(dropna=False) df = df_value_counts.reset_index() df.columns = [col, 'count'] df_top_value_counts = df if df.shape[0] > VALUE_COUNT_LIMIT: df_top_value_counts = df.head(VALUE_COUNT_LIMIT) # TODO: remove duplicate data for distinct values # object_key_distinct_values = s3_paths.path_distinct_values_by_column(self.object_key_prefix, col) # s3_data.upload_dataframe(self.s3_client, df_top_value_counts, object_key_distinct_values, columns=[col]) # object_key_statistics = s3_paths.path_statistics_by_column(self.object_key_prefix, col) # s3_data.upload_dataframe(self.s3_client, df_top_value_counts, object_key_statistics) # features = self.feature_set_version['features'] # feature = find(lambda x: x['uuid'] == col, features) # if feature and feature.get('transformed'): # return {} column_type = self.column_types.get(col) series_non_null = series_cleaned.dropna() if column_type == NUMBER: series_non_null = series_non_null.astype(float).astype(int) elif column_type == NUMBER_WITH_DECIMALS: series_non_null = series_non_null.astype(float) count_unique = len(df_value_counts.index) data = { f'{col}/count': series_non_null.size, f'{col}/count_distinct': count_unique - 1 if np.nan in df_value_counts else count_unique, f'{col}/null_value_rate': 0 if series_cleaned.size == 0 else series_cleaned.isnull().sum() / series_cleaned.size, } if len(series_non_null) == 0: return data dates = None if column_type in NUMBER_TYPES: data[f'{col}/average'] = series_non_null.sum() / len(series_non_null) data[f'{col}/max'] = series_non_null.max() data[f'{col}/median'] = series_non_null.quantile(0.5) data[f'{col}/min'] = series_non_null.min() data[f'{col}/sum'] = series_non_null.sum() elif column_type == DATETIME: dates = pd.to_datetime(series_non_null, utc=True, errors='coerce').dropna() data[f'{col}/max'] = dates.max().isoformat() data[f'{col}/median'] = dates.sort_values().iloc[math.floor(len(dates) / 2)].isoformat() data[f'{col}/min'] = dates.min().isoformat() if column_type not in NUMBER_TYPES: if dates is not None: value_counts = dates.value_counts() else: value_counts = series_non_null.value_counts() mode = value_counts.index[0] if column_type == DATETIME: mode = mode.isoformat() data[f'{col}/mode'] = mode return data src/data_cleaner/statistics/__init__.py METASEP src/data_cleaner/shared/utils.py METASEP from data_cleaner.column_type_detector import ( NUMBER, NUMBER_WITH_DECIMALS, ) import numpy as np def clean_series(series, column_type, dropna=True): series_cleaned = series.map( lambda x: x if (not isinstance(x, str) or (len(x) > 0 and not x.isspace())) else np.nan, ) if dropna: series_cleaned = series_cleaned.dropna() if column_type == NUMBER: try: series_cleaned = series_cleaned.astype(float).astype(int) except ValueError: series_cleaned = series_cleaned.astype(float) elif column_type == NUMBER_WITH_DECIMALS: series_cleaned = series_cleaned.astype(float) return series_cleaned src/data_cleaner/shared/multi.py METASEP from concurrent.futures import ThreadPoolExecutor from threading import Thread MAX_WORKERS = 16 def start_thread(target, **kwargs): thread = Thread( target=target, kwargs=kwargs, ) thread.start() return thread def parallelize(func, arr): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, arr) def parallelize_multiple_args(func, arr_args): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, *zip(*arr_args)) def run_parallel_threads(list_of_funcs_and_args_or_kwargs): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: for func, args in list_of_funcs_and_args_or_kwargs: pool.submit(func, *args) def run_parallel(func, arr_args_1, arr_args_2): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, *arr_args_1, *arr_args_2) src/data_cleaner/shared/hash.py METASEP from functools import reduce import math import re def dig(obj_arg, arr_or_string): if type(arr_or_string) is str: arr_or_string = arr_or_string.split('.') arr = list(map(str.strip, arr_or_string)) def _build(obj, key): tup = re.split(r'\[(\d+)\]$', key) if len(tup) >= 2: key, index = filter(lambda x: x, tup) if key and index: return obj[key][int(index)] elif index: return obj[int(index)] elif obj: return obj.get(key) else: return obj return reduce(_build, arr, obj_arg) def flatten(input_data): final_data = {} for k1, v1 in input_data.items(): if type(v1) is dict: for k2, v2 in v1.items(): if type(v2) is dict: for k3, v3 in v2.items(): final_data[f'{k1}_{k2}_{k3}'] = v3 else: final_data[f'{k1}_{k2}'] = v2 else: final_data[k1] = v1 return final_data def ignore_keys(d, keys): d_keys = d.keys() d2 = d.copy() for key in keys: if key in d_keys: d2.pop(key) return d2 def ignore_keys_with_blank_values(d): d2 = d.copy() for key, value in d.items(): if not value: d2.pop(key) return d2 def extract(d, keys): def _build(obj, key): val = d.get(key, None) if val is not None: obj[key] = val return obj return reduce(_build, keys, {}) def extract_arrays(input_data): arr = [] for k, v in input_data.items(): if type(v) is list: arr.append(v) return arr def group_by(func, arr): def _build(obj, item): val = func(item) if not obj.get(val): obj[val] = [] obj[val].append(item) return obj return reduce(_build, arr, {}) def index_by(func, arr): obj = {} for item in arr: key = func(item) obj[key] = item return obj def merge_dict(a, b): c = a.copy() c.update(b) return c def replace_dict_nan_value(d): def _replace_nan_value(v): if type(v) == float and math.isnan(v): return None return v return {k: _replace_nan_value(v) for k, v in d.items()} src/data_cleaner/shared/array.py METASEP import random def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)] def difference(li1, li2): li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2] return li_dif def flatten(arr): return [item for sublist in arr for item in sublist] def find(condition, arr, map=None): try: return next(map(x) if map else x for x in arr if condition(x)) except StopIteration: return None def sample(arr): return arr[random.randrange(0, len(arr))] def subtract(arr1, arr2): return [i for i in arr1 if i not in arr2] src/data_cleaner/shared/__init__.py METASEP src/data_cleaner/pipelines/base.py METASEP from data_cleaner.cleaning_rules.remove_columns_with_high_empty_rate \ import RemoveColumnsWithHighEmptyRate from data_cleaner.cleaning_rules.remove_columns_with_single_value \ import RemoveColumnsWithSingleValue from data_cleaner.transformer_actions.base import BaseAction DEFAULT_RULES = [ RemoveColumnsWithHighEmptyRate, RemoveColumnsWithSingleValue, ] class BasePipeline(): def __init__(self, actions=[]): self.actions = actions self.rules = DEFAULT_RULES def create_actions(self, df, column_types, statistics): all_suggestions = [] for rule in self.rules: suggestions = rule(df, column_types, statistics).evaluate() if suggestions: all_suggestions += suggestions self.actions = all_suggestions return all_suggestions def transform(self, df): if len(self.actions) == 0: print('Pipeline is empty.') return df df_transformed = df for action in self.actions: df_transformed = BaseAction(action['action_payload']).execute(df_transformed) return df_transformed src/data_cleaner/pipelines/__init__.py METASEP src/data_cleaner/cleaning_rules/unit_conversion.py METASEP src/data_cleaner/cleaning_rules/type_conversion.py METASEP src/data_cleaner/cleaning_rules/remove_outliers.py METASEP src/data_cleaner/cleaning_rules/remove_duplicate_rows.py METASEP src/data_cleaner/cleaning_rules/remove_columns_with_single_value.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveColumnsWithSingleValue(BaseRule): # Check statistic [feature_uuid]/count_distinct def evaluate(self): columns_with_single_value = [] for c in self.df_columns: if f'{c}/count_distinct' not in self.statistics: continue feature_count_distinct = self.statistics[f'{c}/count_distinct'] if feature_count_distinct == 1: columns_with_single_value.append(c) suggestions = [] suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with single value', f'The following columns have single value in all rows: {columns_with_single_value}.'\ ' Suggest to remove them.', ActionType.REMOVE, action_arguments=columns_with_single_value, axis=Axis.COLUMN, )) return suggestions src/data_cleaner/cleaning_rules/remove_columns_with_high_empty_rate.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveColumnsWithHighEmptyRate(BaseRule): MISSING_RATE_THRESHOLD = 0.8 def evaluate(self): columns_with_missing_values = [] columns_with_no_values = [] for c in self.df_columns: if self.statistics.get(f'{c}/count') == 0: columns_with_no_values.append(c) elif f'{c}/null_value_rate' in self.statistics: null_value_rate = self.statistics[f'{c}/null_value_rate'] if null_value_rate >= self.MISSING_RATE_THRESHOLD: columns_with_missing_values.append(c) suggestions = [] if len(columns_with_no_values) > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with no values', f'The following columns have no values: {columns_with_no_values}.'\ ' Removing them may increase your data quality.', ActionType.REMOVE, action_arguments=columns_with_no_values, axis=Axis.COLUMN, )) if len(columns_with_missing_values) > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with high empty rate', f'The following columns have high empty rate: {columns_with_missing_values}.'\ ' Removing them may increase your data quality.', ActionType.REMOVE, action_arguments=columns_with_missing_values, axis=Axis.COLUMN, )) return suggestions src/data_cleaner/cleaning_rules/remove_columns_with_few_unique_values.py METASEP src/data_cleaner/cleaning_rules/remove_collinear_columns.py METASEP src/data_cleaner/cleaning_rules/reformat_values.py METASEP src/data_cleaner/cleaning_rules/impute_values.py METASEP src/data_cleaner/cleaning_rules/fix_encoding.py METASEP src/data_cleaner/cleaning_rules/base.py METASEP class BaseRule: def __init__(self, df, column_types, statistics): self.df = df self.df_columns = df.columns.tolist() self.column_types = column_types self.statistics = statistics def evaluate(self): """Evaluate data cleaning rule and generate suggested actions Returns ------- A list of suggested actions """ return [] def _build_transformer_action_suggestion( self, title, message, action_type, action_arguments=[], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ): return dict( title=title, message=message, action_payload=dict( action_type=action_type, action_arguments=action_arguments, action_code=action_code, action_options=action_options, action_variables=action_variables, axis=axis, outputs=outputs, ), ) src/data_cleaner/cleaning_rules/__init__.py METASEP src/data_cleaner/analysis/constants.py METASEP CHART_TYPE_BAR_HORIZONTAL = 'bar_horizontal' CHART_TYPE_LINE_CHART = 'line_chart' CHART_TYPE_HISTOGRAM = 'histogram' LABEL_TYPE_RANGE = 'range' DATA_KEY_CHARTS = 'charts' DATA_KEY_CORRELATION = 'correlations' DATA_KEY_OVERVIEW = 'overview' DATA_KEY_TIME_SERIES = 'time_series' DATA_KEYS = [ DATA_KEY_CHARTS, DATA_KEY_CORRELATION, DATA_KEY_OVERVIEW, DATA_KEY_TIME_SERIES, ] src/data_cleaner/analysis/charts.py METASEP from data_cleaner.analysis.constants import ( CHART_TYPE_BAR_HORIZONTAL, CHART_TYPE_LINE_CHART, CHART_TYPE_HISTOGRAM, DATA_KEY_TIME_SERIES, LABEL_TYPE_RANGE, ) from data_cleaner.shared.utils import clean_series from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ) import dateutil.parser import math import numpy as np import pandas as pd DD_KEY = 'lambda.analysis_charts' BUCKETS = 40 TIME_SERIES_BUCKETS = 40 def increment(metric, tags={}): pass def build_buckets(min_value, max_value, max_buckets, column_type): diff = max_value - min_value total_interval = 1 + diff bucket_interval = total_interval / max_buckets number_of_buckets = max_buckets is_integer = False parts = str(diff).split('.') if len(parts) == 1: is_integer = True else: is_integer = int(parts[1]) == 0 if NUMBER == column_type and total_interval <= max_buckets and is_integer: number_of_buckets = int(total_interval) bucket_interval = 1 elif bucket_interval > 1: bucket_interval = math.ceil(bucket_interval) else: bucket_interval = round(bucket_interval * 100, 1) / 100 buckets = [] for i in range(number_of_buckets): min_v = min_value + (i * bucket_interval) max_v = min_value + ((i + 1) * bucket_interval) buckets.append(dict( max_value=max_v, min_value=min_v, values=[], )) return buckets, bucket_interval def build_histogram_data(col1, series, column_type): increment(f'{DD_KEY}.build_histogram_data.start', dict(feature_uuid=col1)) max_value = series.max() min_value = series.min() buckets, bucket_interval = build_buckets(min_value, max_value, BUCKETS, column_type) if bucket_interval == 0: return for value in series.values: index = math.floor((value - min_value) / bucket_interval) if value == max_value: index = len(buckets) - 1 buckets[index]['values'].append(value) x = [] y = [] for bucket in buckets: x.append(dict( max=bucket['max_value'], min=bucket['min_value'], )) y.append(dict(value=len(bucket['values']))) increment(f'{DD_KEY}.build_histogram_data.succeeded', dict(feature_uuid=col1)) return dict( type=CHART_TYPE_HISTOGRAM, x=x, x_metadata=dict( label_type=LABEL_TYPE_RANGE, ), y=y, ) def build_correlation_data(df, col1, features): increment(f'{DD_KEY}.build_correlation_data.start', dict(feature_uuid=col1)) x = [] y = [] df_copy = df.copy() for feature in features: col2 = feature['uuid'] column_type = feature['column_type'] series = df_copy[col2] df_copy[col2] = clean_series(series, column_type, dropna=False) corr = df_copy.corr() for feature in features: col2 = feature['uuid'] if col1 != col2: value = corr[col1].get(col2, None) if value is not None: x.append(dict(label=col2)) y.append(dict(value=value)) increment(f'{DD_KEY}.build_correlation_data.succeeded', dict(feature_uuid=col1)) return dict( type=CHART_TYPE_BAR_HORIZONTAL, x=x, y=y, ) def build_time_series_data(df, feature, datetime_column, column_type): col1 = feature['uuid'] column_type = feature['column_type'] tags = dict( column_type=column_type, datetime_column=datetime_column, feature_uuid=col1, ) increment(f'{DD_KEY}.build_time_series_data.start', tags) # print(feature, datetime_column) datetimes = clean_series(df[datetime_column], DATETIME) if datetimes.size <= 1: return min_value_datetime = dateutil.parser.parse(datetimes.min()).timestamp() max_value_datetime = dateutil.parser.parse(datetimes.max()).timestamp() buckets, bucket_interval = build_buckets( min_value_datetime, max_value_datetime, TIME_SERIES_BUCKETS, column_type, ) x = [] y = [] df_copy = df.copy() df_copy[datetime_column] = \ pd.to_datetime(df[datetime_column]).apply(lambda x: x if pd.isnull(x) else x.timestamp()) for bucket in buckets: max_value = bucket['max_value'] min_value = bucket['min_value'] series = df_copy[( df_copy[datetime_column] >= min_value ) & ( df_copy[datetime_column] < max_value )][col1] x.append(dict( max=max_value, min=min_value, )) series_cleaned = clean_series(series, column_type, dropna=False) df_value_counts = series_cleaned.value_counts(dropna=False) series_non_null = series_cleaned.dropna() count_unique = len(df_value_counts.index) y_data = dict( count=series_non_null.size, count_distinct=count_unique - 1 if np.nan in df_value_counts else count_unique, null_value_rate=0 if series_cleaned.size == 0 else series_cleaned.isnull().sum() / series_cleaned.size, ) if column_type in [NUMBER, NUMBER_WITH_DECIMALS]: y_data.update(dict( average=series_non_null.sum() / len(series_non_null), max=series_non_null.max(), median=series_non_null.quantile(0.5), min=series_non_null.min(), sum=series_non_null.sum(), )) elif column_type in [CATEGORY, CATEGORY_HIGH_CARDINALITY, TRUE_OR_FALSE]: value_counts = series_non_null.value_counts() if len(value_counts.index): value_counts_top = value_counts.sort_values(ascending=False).iloc[:12] mode = value_counts_top.index[0] y_data.update(dict( mode=mode, value_counts=value_counts_top.to_dict(), )) y.append(y_data) increment(f'{DD_KEY}.build_time_series_data.succeeded', tags) return dict( type=CHART_TYPE_LINE_CHART, x=x, x_metadata=dict( label=datetime_column, label_type=LABEL_TYPE_RANGE, ), y=y, ) def build_overview_data(df, datetime_features): increment(f'{DD_KEY}.build_overview_data.start') time_series = [] df_copy = df.copy() for feature in datetime_features: column_type = feature['column_type'] datetime_column = feature['uuid'] tags = dict(datetime_column=datetime_column) increment(f'{DD_KEY}.build_overview_time_series.start', tags) if clean_series(df_copy[datetime_column], DATETIME).size <= 1: continue df_copy[datetime_column] = \ pd.to_datetime(df[datetime_column]).apply(lambda x: x if pd.isnull(x) else x.timestamp()) min_value1 = df_copy[datetime_column].min() max_value1 = df_copy[datetime_column].max() buckets, bucket_interval = build_buckets(min_value1, max_value1, TIME_SERIES_BUCKETS, column_type) x = [] y = [] for bucket in buckets: max_value = bucket['max_value'] min_value = bucket['min_value'] df_filtered = df_copy[( df_copy[datetime_column] >= min_value ) & ( df_copy[datetime_column] < max_value )] x.append(dict( max=max_value, min=min_value, )) y.append(dict( count=len(df_filtered.index), )) time_series.append(dict( type=CHART_TYPE_LINE_CHART, x=x, x_metadata=dict( label=datetime_column, label_type=LABEL_TYPE_RANGE, ), y=y, )) increment(f'{DD_KEY}.build_overview_time_series.succeeded', tags) increment(f'{DD_KEY}.build_overview_data.succeeded') return { DATA_KEY_TIME_SERIES: time_series, } src/data_cleaner/analysis/calculator.py METASEP from data_cleaner.analysis import charts from data_cleaner.analysis.constants import ( DATA_KEY_CHARTS, DATA_KEY_CORRELATION, DATA_KEY_TIME_SERIES, ) from data_cleaner.shared.utils import clean_series from data_cleaner.shared.hash import merge_dict from data_cleaner.shared.multi import run_parallel from data_cleaner.transformer_actions import constants from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ) DD_KEY = 'lambda.analysis_calculator' def increment(metric, tags={}): pass class AnalysisCalculator(): def __init__( self, df, column_types, **kwargs, ): self.df = df self.column_types = column_types self.features = [{'uuid': col, 'column_type': column_types.get(col)} for col in df.columns] def process(self, df): increment(f'{DD_KEY}.process.start', self.tags) df_columns = df.columns features_to_use = self.features datetime_features_to_use = [f for f in self.datetime_features if f['uuid'] in df_columns] arr_args_1 = [df for _ in features_to_use], arr_args_2 = features_to_use, data_for_columns = [d for d in run_parallel(self.calculate_column, arr_args_1, arr_args_2)] overview = charts.build_overview_data( df, datetime_features_to_use, ) correlation_overview = [] for d in data_for_columns: corr = d.get(DATA_KEY_CORRELATION) if corr: correlation_overview.append({ 'feature': d['feature'], DATA_KEY_CORRELATION: corr, }) increment(f'{DD_KEY}.process.succeeded', self.tags) return data_for_columns, merge_dict(overview, { DATA_KEY_CORRELATION: correlation_overview, }) @property def features_by_uuid(self): data = {} for feature in self.features: data[feature['uuid']] = feature return data @property def datetime_features(self): return [f for f in self.features if f['column_type'] == DATETIME] @property def tags(self): return dict() def calculate_column(self, df, feature): df_columns = df.columns features_to_use = [f for f in self.features if f['uuid'] in df_columns] datetime_features_to_use = [f for f in self.datetime_features if f['uuid'] in df_columns] col = feature['uuid'] column_type = feature['column_type'] tags = merge_dict(self.tags, dict(column_type=column_type, feature_uuid=col)) increment(f'{DD_KEY}.calculate_column.start', tags) series = df[col] series_cleaned = clean_series(series, column_type) chart_data = [] correlation = [] time_series = [] if column_type in [NUMBER, NUMBER_WITH_DECIMALS]: histogram_data = charts.build_histogram_data(col, series_cleaned, column_type) if histogram_data: chart_data.append(histogram_data) correlation.append(charts.build_correlation_data(df, col, features_to_use)) if column_type in [ CATEGORY, CATEGORY_HIGH_CARDINALITY, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ]: time_series = [] for f in datetime_features_to_use: time_series_chart = charts.build_time_series_data(df, feature, f['uuid'], column_type) if time_series_chart: time_series.append(time_series_chart) increment(f'{DD_KEY}.calculate_column.succeeded', tags) return { 'feature': feature, DATA_KEY_CHARTS: chart_data, DATA_KEY_CORRELATION: correlation, DATA_KEY_TIME_SERIES: time_series, } src/data_cleaner/analysis/__init__.py METASEP src/data_cleaner/tests/cleaning_rules/test_remove_columns_with_single_value.py METASEP from data_cleaner.tests.base_test import TestCase from data_cleaner.cleaning_rules.remove_columns_with_single_value \ import RemoveColumnsWithSingleValue import pandas as pd import numpy as np class RemoveColumnWithSingleValueTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01', True], [2, '2022-01-02', True], [3, np.NaN, True], [4, np.NaN, True], [5, np.NaN, True], ], columns=['id', 'deleted_at', 'is_active']) column_types = { 'id': 'number', 'deleted_at': 'datetime', 'is_active': 'true_or_false', } statistics = { 'id/count_distinct': 5, 'deleted_at/count_distinct': 2, 'is_active/count_distinct': 1, } result = RemoveColumnsWithSingleValue(df, column_types, statistics).evaluate() self.assertEqual(result, [ dict( title='Remove columns with single value', message=f'The following columns have single value in all rows: [\'is_active\'].'\ ' Suggest to remove them.', action_payload=dict( action_type='remove', action_arguments=['is_active'], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) src/data_cleaner/tests/cleaning_rules/test_remove_columns_with_high_empty_rate.py METASEP from data_cleaner.tests.base_test import TestCase from data_cleaner.cleaning_rules.remove_columns_with_high_empty_rate \ import RemoveColumnsWithHighEmptyRate import numpy as np import pandas as pd class RemoveColumnWithHighMissingRateTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01'], [2, np.NaN], [3, np.NaN], [4, np.NaN], [5, np.NaN], ], columns=['id', 'deleted_at']) column_types = { 'id': 'number', 'deleted_at': 'datetime', } statistics = { 'id/null_value_rate': 0, 'deleted_at/null_value_rate': 0.8, } result = RemoveColumnsWithHighEmptyRate( df, column_types, statistics, ).evaluate() self.assertEqual(result, [ dict( title='Remove columns with high empty rate', message='The following columns have high empty rate: [\'deleted_at\'].'\ ' Removing them may increase your data quality.', action_payload=dict( action_type='remove', action_arguments=['deleted_at'], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) src/data_cleaner/tests/cleaning_rules/__init__.py METASEP src/data_cleaner/data_cleaner.py METASEP from data_cleaner import column_type_detector from data_cleaner.analysis.calculator import AnalysisCalculator from data_cleaner.pipelines.base import BasePipeline from data_cleaner.shared.hash import merge_dict from data_cleaner.statistics.calculator import StatisticsCalculator def clean(df): cleaner = DataCleaner() return cleaner.clean(df) class DataCleaner(): def analyze(self, df): """ Analyze a dataframe 1. Detect column types 2. Calculate statisitics 3. Calculate analysis """ column_types = column_type_detector.infer_column_types(df) statistics = StatisticsCalculator(column_types).process(df) analysis = AnalysisCalculator(df, column_types).process(df) return dict( analysis=analysis, column_types=column_types, statistics=statistics, ) def clean(self, df): df_stats = self.analyze(df) pipeline = BasePipeline() suggested_actions = pipeline.create_actions( df, df_stats['column_types'], df_stats['statistics'], ) df_cleaned = pipeline.transform(df) return merge_dict(df_stats, dict( df_cleaned=df_cleaned, suggested_actions=suggested_actions, )) src/data_cleaner/column_type_detector.py METASEP from data_cleaner.shared.array import subtract import numpy as np import re import warnings DATETIME_MATCHES_THRESHOLD = 0.5 MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES = 40 CATEGORY = 'category' CATEGORY_HIGH_CARDINALITY = 'category_high_cardinality' DATETIME = 'datetime' EMAIL = 'email' NUMBER = 'number' NUMBER_WITH_DECIMALS = 'number_with_decimals' PHONE_NUMBER = 'phone_number' TEXT = 'text' TRUE_OR_FALSE = 'true_or_false' ZIP_CODE = 'zip_code' NUMBER_TYPES = [NUMBER, NUMBER_WITH_DECIMALS] STRING_TYPES = [EMAIL, PHONE_NUMBER, TEXT, ZIP_CODE] COLUMN_TYPES = [ CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, EMAIL, NUMBER, NUMBER_WITH_DECIMALS, PHONE_NUMBER, TEXT, TRUE_OR_FALSE, ZIP_CODE, ] REGEX_DATETIME_PATTERN = r'^[\d]{2,4}-[\d]{1,2}-[\d]{1,2}$|^[\d]{2,4}-[\d]{1,2}-[\d]{1,2}[Tt ]{1}[\d]{1,2}:[\d]{1,2}[:]{0,1}[\d]{1,2}[\.]{0,1}[\d]*|^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$|^\d{1,4}[-\/]{1}\d{1,2}[-\/]{1}\d{1,4}$|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+(\d{1,2})[\s,]+(\d{2,4})' REGEX_EMAIL_PATTERN = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$" REGEX_EMAIL = re.compile(REGEX_EMAIL_PATTERN) REGEX_INTEGER_PATTERN = r'^[\-]{0,1}[\$]{0,1}[0-9,]+$' REGEX_INTEGER = re.compile(REGEX_INTEGER_PATTERN) REGEX_NUMBER_PATTERN = r'^[\-]{0,1}[\$]{0,1}[0-9,]+\.[0-9]*%{0,1}$|^[\-]{0,1}[\$]{0,1}[0-9,]+%{0,1}$' REGEX_NUMBER = re.compile(REGEX_NUMBER_PATTERN) REGEX_PHONE_NUMBER_PATTERN = r'^\s*(?:\+?(\d{1,3}))?[-. (]*(\d{3})[-. )]*(\d{3})[-. ]*(\d{4})(?: *x(\d+))?\s*$' REGEX_PHONE_NUMBER = re.compile(REGEX_PHONE_NUMBER_PATTERN) REGEX_ZIP_CODE_PATTERN = r'^\d{3,5}(?:[-\s]\d{4})?$' REGEX_ZIP_CODE = re.compile(REGEX_ZIP_CODE_PATTERN) def infer_column_types(df, **kwargs): binary_feature_names = [] category_feature_names = [] datetime_feature_names = [] email_features = [] float_feature_names = [] integer_feature_names = [] non_number_feature_names = [] phone_number_feature_names = [] text_feature_names = [] zip_code_feature_names = [] for idx, col_type in enumerate(df.dtypes): col_name = df.columns[idx] if 'datetime64' in str(col_type): datetime_feature_names.append(col_name) elif col_type == 'object': df_sub = df[col_name].copy() df_sub = df_sub.replace('^\s+$', np.nan, regex=True) df_sub = df_sub.dropna() df_sub = df_sub.apply(lambda x: x.strip() if type(x) is str else x) if df_sub.empty: non_number_feature_names.append(col_name) else: first_item = df_sub.iloc[0] if type(first_item) is list: text_feature_names.append(col_name) elif type(first_item) is bool or type(first_item) is np.bool_: if len(df[col_name].unique()) <= 2: binary_feature_names.append(col_name) else: category_feature_names.append(col_name) elif len(df[col_name].unique()) <= 2: binary_feature_names.append(col_name) else: df_sub = df_sub.astype(str) incorrect_emails = len( df_sub[df_sub.str.contains(REGEX_EMAIL) == False].index, ) warnings.filterwarnings('ignore', 'This pattern has match groups') incorrect_phone_numbers = len( df_sub[df_sub.str.contains(REGEX_PHONE_NUMBER) == False].index, ) incorrect_zip_codes = len( df_sub[df_sub.str.contains(REGEX_ZIP_CODE) == False].index, ) if all(df_sub.str.contains(REGEX_INTEGER)): integer_feature_names.append(col_name) elif all(df_sub.str.contains(REGEX_NUMBER)): float_feature_names.append(col_name) elif incorrect_emails / len(df_sub.index) <= 0.99: email_features.append(col_name) elif incorrect_phone_numbers / len(df_sub.index) <= 0.99: phone_number_feature_names.append(col_name) elif incorrect_zip_codes / len(df_sub.index) <= 0.99: zip_code_feature_names.append(col_name) else: non_number_feature_names.append(col_name) elif col_type == 'bool': binary_feature_names.append(col_name) elif np.issubdtype(col_type, np.floating): float_feature_names.append(col_name) elif np.issubdtype(col_type, np.integer): df_sub = df[col_name].copy() df_sub = df_sub.dropna() if df_sub.min() >= 100 and df_sub.max() <= 99999 and 'zip' in col_name.lower(): zip_code_feature_names.append(col_name) else: integer_feature_names.append(col_name) number_feature_names = float_feature_names + integer_feature_names binary_feature_names += \ [col for col in number_feature_names if df[col].nunique(dropna=False) == 2] binary_feature_names += \ [col for col in non_number_feature_names if df[col].nunique(dropna=False) == 2] float_feature_names = [col for col in float_feature_names if col not in binary_feature_names] integer_feature_names = \ [col for col in integer_feature_names if col not in binary_feature_names] for col_name in subtract(non_number_feature_names, binary_feature_names): df_drop_na = df[col_name].dropna() if df_drop_na.empty: text_feature_names.append(col_name) else: matches = df_drop_na.astype(str).str.contains(REGEX_DATETIME_PATTERN) matches = matches.where(matches == True).dropna() if type(df_drop_na.iloc[0]) is list: text_feature_names.append(col_name) elif len(df_drop_na[matches.index]) / len(df_drop_na) >= DATETIME_MATCHES_THRESHOLD: datetime_feature_names.append(col_name) elif df_drop_na.nunique() / len(df_drop_na) >= 0.8: text_feature_names.append(col_name) else: word_count, _ = \ df[col_name].dropna().map(lambda x: (len(str(x).split(' ')), str(x))).max() if word_count > MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES: text_feature_names.append(col_name) else: category_feature_names.append(col_name) low_cardinality_category_feature_names = \ [col for col in category_feature_names if df[col].nunique() <= kwargs.get( 'category_cardinality_threshold', 255, )] high_cardinality_category_feature_names = \ [col for col in category_feature_names if col not in low_cardinality_category_feature_names] column_types = {} array_types_mapping = { CATEGORY: low_cardinality_category_feature_names, CATEGORY_HIGH_CARDINALITY: high_cardinality_category_feature_names, DATETIME: datetime_feature_names, EMAIL: email_features, NUMBER: integer_feature_names, NUMBER_WITH_DECIMALS: float_feature_names, PHONE_NUMBER: phone_number_feature_names, TEXT: text_feature_names, TRUE_OR_FALSE: binary_feature_names, ZIP_CODE: zip_code_feature_names, } for col_type, arr in array_types_mapping.items(): for col in arr: column_types[col] = col_type return column_types src/data_cleaner/__init__.py METASEP src/data_cleaner/tests/base_test.py METASEP import unittest class TestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): pass src/data_cleaner/tests/__init__.py METASEP src/data_cleaner/tests/transformer_actions/test_column.py METASEP
[ { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')\n df_new = max(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1100],\n [2, 1050, 1150],\n [1, 1100, 1100],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n action2 = dict(\n action_arguments=['amount'],\n action_options=dict(),\n outputs=[\n dict(uuid='max_amount'),\n ],\n )\n df_new2 = max(TEST_DATAFRAME.copy(), action2)\n df_expected2 = pd.DataFrame([\n [1, 1000, 1150],\n [2, 1050, 1150],\n [1, 1100, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new2, df_expected2)\n\n def test_median(self):\n from data_cleaner.transformer_actions.column import median\n action = self.__groupby_agg_action('median_amount')\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1550],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n ])\n df_new = median(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1150],\n [1, 1100, 1050],\n [2, 1550, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'median_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_min(self):\n from data_cleaner.transformer_actions.column import min\n action = self.__groupby_agg_action('min_amount')\n df_new = min(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1000],\n [2, 1050, 1050],\n [1, 1100, 1000],\n [2, 1150, 1050],\n ], columns=[\n 'group_id',\n 'amount',\n 'min_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_select(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['group_id']\n )\n df_new = select(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n ),\n dict(\n group_id=2,\n ),\n ])\n\n def test_shift_down(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='prev_sold'),\n ],\n )\n df_new = shift_down(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n prev_sold=1000,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n prev_sold=1050,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n prev_sold=1200,\n ),\n ])\n\n def test_shift_down_with_groupby(self):\n df = pd.DataFrame([\n [1, '2020-01-01', 1000],\n [1, '2020-01-02', 1050],\n [2, '2020-01-03', 1200],\n [1, '2020-01-04', 990],\n [2, '2020-01-05', 980],\n [2, '2020-01-06', 970],\n [2, '2020-01-07', 960],\n ], columns=[\n 'group_id',\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n action_options=dict(\n groupby_columns=['group_id'],\n periods=2,\n ),\n outputs=[\n dict(uuid='prev_sold'),\n ],\n )\n df_new = shift_down(df, action)\n df_expected = pd.DataFrame([\n [1, '2020-01-01', 1000, None],\n [1, '2020-01-02', 1050, None],\n [2, '2020-01-03', 1200, None],\n [1, '2020-01-04', 990, 1000],\n [2, '2020-01-05', 980, None],\n [2, '2020-01-06', 970, 1200],\n [2, '2020-01-07', 960, 980],\n ], columns=[\n 'group_id',\n 'date',\n 'sold',\n 'prev_sold',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_shift_up(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='next_sold'),\n ],\n )\n df_new = shift_up(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[:-1], [\n dict(\n date='2020-01-01',\n sold=1000,\n next_sold=1050,\n ),\n dict(\n date='2020-01-02',\n sold=1050,\n next_sold=1200,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n next_sold=990,\n ),\n ])\n\n def test_sum(self):\n from data_cleaner.transformer_actions.column import sum", "type": "infile" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average", "type": "infile" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')\n df_new = max(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1100],\n [2, 1050, 1150],\n [1, 1100, 1100],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n action2 = dict(\n action_arguments=['amount'],\n action_options=dict(),\n outputs=[\n dict(uuid='max_amount'),\n ],\n )\n df_new2 = max(TEST_DATAFRAME.copy(), action2)\n df_expected2 = pd.DataFrame([\n [1, 1000, 1150],\n [2, 1050, 1150],\n [1, 1100, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new2, df_expected2)\n\n def test_median(self):\n from data_cleaner.transformer_actions.column import median\n action = self.__groupby_agg_action('median_amount')\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1550],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n ])\n df_new = median(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1150],\n [1, 1100, 1050],\n [2, 1550, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'median_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_min(self):\n from data_cleaner.transformer_actions.column import min", "type": "infile" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max", "type": "infile" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')\n df_new = max(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1100],\n [2, 1050, 1150],\n [1, 1100, 1100],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n action2 = dict(\n action_arguments=['amount'],\n action_options=dict(),\n outputs=[\n dict(uuid='max_amount'),\n ],\n )\n df_new2 = max(TEST_DATAFRAME.copy(), action2)\n df_expected2 = pd.DataFrame([\n [1, 1000, 1150],\n [2, 1050, 1150],\n [1, 1100, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new2, df_expected2)\n\n def test_median(self):\n from data_cleaner.transformer_actions.column import median", "type": "infile" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')\n df_new = max(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1100],\n [2, 1050, 1150],\n [1, 1100, 1100],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n action2 = dict(\n action_arguments=['amount'],\n action_options=dict(),\n outputs=[\n dict(uuid='max_amount'),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')\n df_new = max(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1100],\n [2, 1050, 1150],\n [1, 1100, 1100],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n action2 = dict(\n action_arguments=['amount'],\n action_options=dict(),\n outputs=[\n dict(uuid='max_amount'),\n ],\n )\n df_new2 = max(TEST_DATAFRAME.copy(), action2)\n df_expected2 = pd.DataFrame([\n [1, 1000, 1150],\n [2, 1050, 1150],\n [1, 1100, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new2, df_expected2)\n\n def test_median(self):\n from data_cleaner.transformer_actions.column import median\n action = self.__groupby_agg_action('median_amount')\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1550],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n ])\n df_new = median(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1150],\n [1, 1100, 1050],\n [2, 1550, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'median_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_min(self):\n from data_cleaner.transformer_actions.column import min\n action = self.__groupby_agg_action('min_amount')\n df_new = min(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1000],\n [2, 1050, 1050],\n [1, 1100, 1000],\n [2, 1150, 1050],\n ], columns=[\n 'group_id',\n 'amount',\n 'min_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_select(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['group_id']\n )\n df_new = select(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n ),\n dict(\n group_id=2,\n ),\n ])\n\n def test_shift_down(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='prev_sold'),\n ],\n )\n df_new = shift_down(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n prev_sold=1000,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n prev_sold=1050,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n prev_sold=1200,\n ),\n ])\n\n def test_shift_down_with_groupby(self):\n df = pd.DataFrame([\n [1, '2020-01-01', 1000],\n [1, '2020-01-02', 1050],\n [2, '2020-01-03', 1200],\n [1, '2020-01-04', 990],\n [2, '2020-01-05', 980],\n [2, '2020-01-06', 970],\n [2, '2020-01-07', 960],\n ], columns=[\n 'group_id',\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n action_options=dict(\n groupby_columns=['group_id'],\n periods=2,\n ),\n outputs=[\n dict(uuid='prev_sold'),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')\n df_new = max(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1100],\n [2, 1050, 1150],\n [1, 1100, 1100],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n action2 = dict(\n action_arguments=['amount'],\n action_options=dict(),\n outputs=[\n dict(uuid='max_amount'),\n ],\n )\n df_new2 = max(TEST_DATAFRAME.copy(), action2)\n df_expected2 = pd.DataFrame([\n [1, 1000, 1150],\n [2, 1050, 1150],\n [1, 1100, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new2, df_expected2)\n\n def test_median(self):\n from data_cleaner.transformer_actions.column import median\n action = self.__groupby_agg_action('median_amount')\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1550],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n ])", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')\n df_new = max(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1100],\n [2, 1050, 1150],\n [1, 1100, 1100],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n action2 = dict(\n action_arguments=['amount'],\n action_options=dict(),\n outputs=[\n dict(uuid='max_amount'),\n ],\n )\n df_new2 = max(TEST_DATAFRAME.copy(), action2)\n df_expected2 = pd.DataFrame([\n [1, 1000, 1150],\n [2, 1050, 1150],\n [1, 1100, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new2, df_expected2)\n\n def test_median(self):\n from data_cleaner.transformer_actions.column import median\n action = self.__groupby_agg_action('median_amount')\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1550],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n ])\n df_new = median(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1150],\n [1, 1100, 1050],\n [2, 1550, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'median_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_min(self):\n from data_cleaner.transformer_actions.column import min\n action = self.__groupby_agg_action('min_amount')\n df_new = min(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1000],\n [2, 1050, 1050],\n [1, 1100, 1000],\n [2, 1150, 1050],\n ], columns=[\n 'group_id',\n 'amount',\n 'min_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_select(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['group_id']\n )\n df_new = select(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n ),\n dict(\n group_id=2,\n ),\n ])\n\n def test_shift_down(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='prev_sold'),\n ],\n )\n df_new = shift_down(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n prev_sold=1000,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n prev_sold=1050,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n prev_sold=1200,\n ),\n ])\n\n def test_shift_down_with_groupby(self):\n df = pd.DataFrame([\n [1, '2020-01-01', 1000],\n [1, '2020-01-02', 1050],\n [2, '2020-01-03', 1200],\n [1, '2020-01-04', 990],\n [2, '2020-01-05', 980],\n [2, '2020-01-06', 970],\n [2, '2020-01-07', 960],\n ], columns=[\n 'group_id',\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n action_options=dict(\n groupby_columns=['group_id'],\n periods=2,\n ),\n outputs=[\n dict(uuid='prev_sold'),\n ],\n )\n df_new = shift_down(df, action)\n df_expected = pd.DataFrame([\n [1, '2020-01-01', 1000, None],\n [1, '2020-01-02', 1050, None],\n [2, '2020-01-03', 1200, None],\n [1, '2020-01-04', 990, 1000],\n [2, '2020-01-05', 980, None],\n [2, '2020-01-06', 970, 1200],\n [2, '2020-01-07', 960, 980],\n ], columns=[\n 'group_id',\n 'date',\n 'sold',\n 'prev_sold',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_shift_up(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='next_sold'),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')\n df_new = max(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1100],\n [2, 1050, 1150],\n [1, 1100, 1100],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n action2 = dict(\n action_arguments=['amount'],\n action_options=dict(),\n outputs=[\n dict(uuid='max_amount'),\n ],\n )\n df_new2 = max(TEST_DATAFRAME.copy(), action2)\n df_expected2 = pd.DataFrame([\n [1, 1000, 1150],\n [2, 1050, 1150],\n [1, 1100, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new2, df_expected2)\n\n def test_median(self):\n from data_cleaner.transformer_actions.column import median\n action = self.__groupby_agg_action('median_amount')\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1550],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n ])\n df_new = median(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1150],\n [1, 1100, 1050],\n [2, 1550, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'median_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_min(self):\n from data_cleaner.transformer_actions.column import min\n action = self.__groupby_agg_action('min_amount')", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')\n df_new = max(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1100],\n [2, 1050, 1150],\n [1, 1100, 1100],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n action2 = dict(\n action_arguments=['amount'],\n action_options=dict(),\n outputs=[\n dict(uuid='max_amount'),\n ],\n )\n df_new2 = max(TEST_DATAFRAME.copy(), action2)\n df_expected2 = pd.DataFrame([\n [1, 1000, 1150],\n [2, 1050, 1150],\n [1, 1100, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new2, df_expected2)\n\n def test_median(self):\n from data_cleaner.transformer_actions.column import median\n action = self.__groupby_agg_action('median_amount')\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1550],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n ])\n df_new = median(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1150],\n [1, 1100, 1050],\n [2, 1550, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'median_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_min(self):\n from data_cleaner.transformer_actions.column import min\n action = self.__groupby_agg_action('min_amount')\n df_new = min(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1000],\n [2, 1050, 1050],\n [1, 1100, 1000],\n [2, 1150, 1050],\n ], columns=[\n 'group_id',\n 'amount',\n 'min_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_select(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['group_id']\n )\n df_new = select(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n ),\n dict(\n group_id=2,\n ),\n ])\n\n def test_shift_down(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='prev_sold'),\n ],\n )\n df_new = shift_down(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n prev_sold=1000,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n prev_sold=1050,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n prev_sold=1200,\n ),\n ])\n\n def test_shift_down_with_groupby(self):\n df = pd.DataFrame([\n [1, '2020-01-01', 1000],\n [1, '2020-01-02', 1050],\n [2, '2020-01-03', 1200],\n [1, '2020-01-04', 990],\n [2, '2020-01-05', 980],\n [2, '2020-01-06', 970],\n [2, '2020-01-07', 960],\n ], columns=[\n 'group_id',\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n action_options=dict(\n groupby_columns=['group_id'],\n periods=2,\n ),\n outputs=[\n dict(uuid='prev_sold'),\n ],\n )\n df_new = shift_down(df, action)\n df_expected = pd.DataFrame([\n [1, '2020-01-01', 1000, None],\n [1, '2020-01-02', 1050, None],\n [2, '2020-01-03', 1200, None],\n [1, '2020-01-04', 990, 1000],\n [2, '2020-01-05', 980, None],\n [2, '2020-01-06', 970, 1200],\n [2, '2020-01-07', 960, 980],\n ], columns=[\n 'group_id',\n 'date',\n 'sold',\n 'prev_sold',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_shift_up(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='next_sold'),\n ],\n )\n df_new = shift_up(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[:-1], [\n dict(\n date='2020-01-01',\n sold=1000,\n next_sold=1050,\n ),\n dict(\n date='2020-01-02',\n sold=1050,\n next_sold=1200,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n next_sold=990,\n ),\n ])\n\n def test_sum(self):\n from data_cleaner.transformer_actions.column import sum\n action = self.__groupby_agg_action('total_amount')", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')\n df_new = max(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1100],\n [2, 1050, 1150],\n [1, 1100, 1100],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n action2 = dict(\n action_arguments=['amount'],\n action_options=dict(),\n outputs=[\n dict(uuid='max_amount'),\n ],\n )\n df_new2 = max(TEST_DATAFRAME.copy(), action2)\n df_expected2 = pd.DataFrame([\n [1, 1000, 1150],\n [2, 1050, 1150],\n [1, 1100, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new2, df_expected2)\n\n def test_median(self):\n from data_cleaner.transformer_actions.column import median\n action = self.__groupby_agg_action('median_amount')\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1550],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n ])\n df_new = median(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1150],\n [1, 1100, 1050],\n [2, 1550, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'median_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_min(self):\n from data_cleaner.transformer_actions.column import min\n action = self.__groupby_agg_action('min_amount')\n df_new = min(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1000],\n [2, 1050, 1050],\n [1, 1100, 1000],\n [2, 1150, 1050],\n ], columns=[\n 'group_id',\n 'amount',\n 'min_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_select(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['group_id']\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],\n ['2020-01-04', 0, 0, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected2 = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, np.nan, 900],\n ['2020-01-04', 0, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected3 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1300, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1300, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected4 = pd.DataFrame([\n ['2020-01-01', 1000, 1250, 800],\n ['2020-01-02', 1200, 1200, 700],\n ['2020-01-03', 1200, 1250, 900],\n ['2020-01-04', 1200, 1250, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n df_expected5 = pd.DataFrame([\n ['2020-01-01', 1000, 800, 800],\n ['2020-01-02', 700, 1200, 700],\n ['2020-01-03', 1200, 900, 900],\n ['2020-01-04', 700, 700, 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n \n df_new1['sold'] = df_new1['sold'].astype(int)\n df_new1['curr_profit'] = df_new1['curr_profit'].astype(int)\n df_new2['sold'] = df_new2['sold'].astype(int)\n df_new3['sold'] = df_new3['sold'].astype(int)\n df_new3['curr_profit'] = df_new3['curr_profit'].astype(int)\n df_new4['sold'] = df_new4['sold'].astype(int)\n df_new4['curr_profit'] = df_new4['curr_profit'].astype(int)\n df_new5['sold'] = df_new5['sold'].astype(int)\n df_new5['curr_profit'] = df_new5['curr_profit'].astype(int)\n\n assert_frame_equal(df_new1, df_expected1)\n assert_frame_equal(df_new2, df_expected2)\n assert_frame_equal(df_new3, df_expected3)\n assert_frame_equal(df_new4, df_expected4)\n assert_frame_equal(df_new5, df_expected5)\n \n with self.assertRaises(Exception):\n _ = impute(df.copy(), action_invalid)\n\n def test_last_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='last_order'),\n ],\n )\n df_new = last(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1050,\n last_order=1150,\n ),\n dict(\n group_id=1,\n order_id=1100,\n last_order=1100,\n ),\n dict(\n group_id=2,\n order_id=1150,\n last_order=1150,\n ),\n ])\n\n def test_max(self):\n from data_cleaner.transformer_actions.column import max\n action = self.__groupby_agg_action('max_amount')\n df_new = max(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1100],\n [2, 1050, 1150],\n [1, 1100, 1100],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n action2 = dict(\n action_arguments=['amount'],\n action_options=dict(),\n outputs=[\n dict(uuid='max_amount'),\n ],\n )\n df_new2 = max(TEST_DATAFRAME.copy(), action2)\n df_expected2 = pd.DataFrame([\n [1, 1000, 1150],\n [2, 1050, 1150],\n [1, 1100, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'max_amount',\n ])\n assert_frame_equal(df_new2, df_expected2)\n\n def test_median(self):\n from data_cleaner.transformer_actions.column import median\n action = self.__groupby_agg_action('median_amount')\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1550],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n ])\n df_new = median(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1150],\n [1, 1100, 1050],\n [2, 1550, 1150],\n [2, 1150, 1150],\n ], columns=[\n 'group_id',\n 'amount',\n 'median_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_min(self):\n from data_cleaner.transformer_actions.column import min\n action = self.__groupby_agg_action('min_amount')\n df_new = min(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1000],\n [2, 1050, 1050],\n [1, 1100, 1000],\n [2, 1150, 1050],\n ], columns=[\n 'group_id',\n 'amount',\n 'min_amount',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_select(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['group_id']\n )\n df_new = select(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n ),\n dict(\n group_id=2,\n ),\n ])\n\n def test_shift_down(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='prev_sold'),\n ],\n )\n df_new = shift_down(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n prev_sold=1000,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n prev_sold=1050,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n prev_sold=1200,\n ),\n ])\n\n def test_shift_down_with_groupby(self):\n df = pd.DataFrame([\n [1, '2020-01-01', 1000],\n [1, '2020-01-02', 1050],\n [2, '2020-01-03', 1200],\n [1, '2020-01-04', 990],\n [2, '2020-01-05', 980],\n [2, '2020-01-06', 970],\n [2, '2020-01-07', 960],\n ], columns=[\n 'group_id',\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n action_options=dict(\n groupby_columns=['group_id'],\n periods=2,\n ),\n outputs=[\n dict(uuid='prev_sold'),\n ],\n )\n df_new = shift_down(df, action)\n df_expected = pd.DataFrame([\n [1, '2020-01-01', 1000, None],\n [1, '2020-01-02', 1050, None],\n [2, '2020-01-03', 1200, None],\n [1, '2020-01-04', 990, 1000],\n [2, '2020-01-05', 980, None],\n [2, '2020-01-06', 970, 1200],\n [2, '2020-01-07', 960, 980],\n ], columns=[\n 'group_id',\n 'date',\n 'sold',\n 'prev_sold',\n ])\n assert_frame_equal(df_new, df_expected)\n", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition3',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(\n add_column(\n add_column(df, action1),\n action2,\n ),\n action3,\n )\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 11, 11, 20],\n [4, 2, 9, 3, 15, 14, 17],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_addition',\n 'integer_addition2',\n 'integer_addition3',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_addition_days(self):\n df = pd.DataFrame([\n ['2021-08-31'],\n ['2021-08-28'],\n ], columns=[\n 'created_at',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='addition',\n value=3,\n ),\n outputs=[\n dict(\n uuid='3d_after_creation',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-03 00:00:00'],\n ['2021-08-28', '2021-08-31 00:00:00'],\n ], columns=[\n 'created_at',\n '3d_after_creation'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_constant(self):\n df = pd.DataFrame([\n [False],\n [True],\n ], columns=[\n 'boolean',\n ])\n action = dict(\n action_arguments=[10],\n action_options=dict(\n udf='constant',\n ),\n outputs=[\n dict(\n uuid='integer',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n boolean=False,\n integer=10,\n ),\n dict(\n boolean=True,\n integer=10,\n ),\n ])\n\n def test_add_column_date_trunc(self):\n df = pd.DataFrame([\n ['2021-08-31', False],\n ['2021-08-28', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='date_trunc',\n date_part='week',\n ),\n outputs=[\n dict(\n uuid='week_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2021-08-31',\n boolean=False,\n week_date='2021-08-30',\n ),\n dict(\n created_at='2021-08-28',\n boolean=True,\n week_date='2021-08-23',\n ),\n ])\n\n def test_add_column_difference(self):\n df = pd.DataFrame([\n [1, 3],\n [4, 2],\n ], columns=[\n 'integer1',\n 'integer2',\n ])\n action1 = dict(\n action_arguments=['integer1', 'integer2'],\n action_options={\n 'udf': 'difference',\n },\n outputs=[\n dict(\n uuid='integer_difference',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'difference',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_difference2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, -2, -9],\n [4, 2, 2, -6],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer_difference',\n 'integer_difference2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_difference_days(self):\n df = pd.DataFrame([\n ['2021-08-31', '2021-09-14'],\n ['2021-08-28', '2021-09-03'],\n ], columns=[\n 'created_at',\n 'converted_at',\n ])\n action = dict(\n action_arguments=['converted_at', 'created_at'],\n action_options=dict(\n column_type='datetime',\n time_unit='d',\n udf='difference',\n ),\n outputs=[\n dict(\n uuid='days_diff',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['2021-08-31', '2021-09-14', 14],\n ['2021-08-28', '2021-09-03', 6],\n ], columns=[\n 'created_at',\n 'converted_at',\n 'days_diff',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_distance_between(self):\n df = pd.DataFrame([\n [26.05308, -97.31838, 33.41939, -112.32606],\n [39.71954, -84.13056, 33.41939, -112.32606],\n ], columns=[\n 'lat1',\n 'lng1',\n 'lat2',\n 'lng2',\n ])\n action = dict(\n action_arguments=['lat1', 'lng1', 'lat2', 'lng2'],\n action_options=dict(\n udf='distance_between',\n ),\n outputs=[\n dict(\n uuid='distance',\n column_type='number_with_decimals',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n lat1=26.05308,\n lng1=-97.31838,\n lat2=33.41939,\n lng2=-112.32606,\n distance=1661.8978520305657,\n ),\n dict(\n lat1=39.71954,\n lng1=-84.13056,\n lat2=33.41939,\n lng2=-112.32606,\n distance=2601.5452571116184,\n ),\n ])\n\n def test_add_column_divide(self):\n df = pd.DataFrame([\n [12, 3, 70, 9],\n [4, 2, 90, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'divide',\n },\n outputs=[\n dict(\n uuid='integer_divide',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'divide',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_divide2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [12, 3, 70, 9, 4, 7],\n [4, 2, 90, 3, 2, 9],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_divide',\n 'integer_divide2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n # def test_add_column_extract_dict_string(self):\n # df = pd.DataFrame([\n # '{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # '{\\'country\\': \\'CA\\'}',\n # '{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'age'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_age',\n # column_type='number',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\'country\\': \\'US\\', \\'age\\': \\'20\\'}',\n # property_age=20,\n # ),\n # dict(\n # properties='{\\'country\\': \\'CA\\'}',\n # property_age=0,\n # ),\n # dict(\n # properties='{\\'country\\': \\'UK\\', \\'age\\': \\'24\\'}',\n # property_age=24,\n # ),\n # dict(\n # properties='',\n # property_age=0,\n # ),\n # ])\n\n # def test_add_column_extract_dict_string_with_json(self):\n # df = pd.DataFrame([\n # '{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # '{\\\"country\\\": \\\"CA\\\"}',\n # '{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # '',\n # ], columns=[\n # 'properties',\n # ])\n # action = dict(\n # action_arguments=['properties', 'country'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_country',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = add_column(df, action)\n # self.assertEqual(df_new.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_country='US',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_country='CA',\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_country='UK',\n # ),\n # dict(\n # properties='',\n # property_country=np.NaN,\n # ),\n # ])\n # action2 = dict(\n # action_arguments=['properties', 'is_adult'],\n # action_options=dict(\n # udf='extract_dict_value',\n # ),\n # outputs=[\n # dict(\n # uuid='property_is_adult',\n # column_type='true_or_false',\n # ),\n # ],\n # )\n # df_new2 = add_column(df, action2)\n # self.assertEqual(df_new2.to_dict(orient='records'), [\n # dict(\n # properties='{\\\"country\\\": \\\"US\\\", \\\"is_adult\\\": true}',\n # property_is_adult=True,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"CA\\\"}',\n # property_is_adult=None,\n # ),\n # dict(\n # properties='{\\\"country\\\": \\\"UK\\\", \\\"is_adult\\\": false}',\n # property_is_adult=False,\n # ),\n # dict(\n # properties='',\n # property_is_adult=None,\n # ),\n # ])\n\n def test_add_column_formatted_date(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', False],\n ['2019-03-05 03:30:30', True],\n ], columns=[\n 'created_at',\n 'boolean',\n ])\n action = dict(\n action_arguments=['created_at'],\n action_options=dict(\n udf='formatted_date',\n format='%Y-%m-%d',\n ),\n outputs=[\n dict(\n uuid='created_date',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n created_at='2019-04-10 08:20:58',\n boolean=False,\n created_date='2019-04-10',\n ),\n dict(\n created_at='2019-03-05 03:30:30',\n boolean=True,\n created_date='2019-03-05',\n ),\n ])\n\n def test_add_column_if_else(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58'],\n [None],\n ], columns=[\n 'converted_at'\n ])\n action = dict(\n action_arguments=[False, True],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n ),\n outputs=[\n dict(\n uuid='converted',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n converted=True,\n ),\n dict(\n converted_at=None,\n converted=False,\n ),\n ])\n\n def test_add_column_if_else_with_column(self):\n df = pd.DataFrame([\n ['2019-04-10 08:20:58', 'test_user_id'],\n [None, None],\n ], columns=[\n 'converted_at',\n 'user_id',\n ])\n action = dict(\n action_arguments=['unknown', 'user_id'],\n action_code='converted_at == null',\n action_options=dict(\n udf='if_else',\n arg1_type='value',\n arg2_type='column',\n ),\n outputs=[\n dict(\n uuid='user_id_clean',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action, original_df=df)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n converted_at='2019-04-10 08:20:58',\n user_id='test_user_id',\n user_id_clean='test_user_id',\n ),\n dict(\n converted_at=None,\n user_id=None,\n user_id_clean='unknown',\n ),\n ])\n\n def test_add_column_multiply(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n ],\n action_options={\n 'udf': 'multiply',\n },\n outputs=[\n dict(\n uuid='integer_multiply',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer3'],\n action_options={\n 'udf': 'multiply',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_multiply2',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action1), action2)\n df_expected = pd.DataFrame([\n [1, 3, 7, 9, 3, 70],\n [4, 2, 9, 3, 8, 90],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n 'integer_multiply',\n 'integer_multiply2'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_replace(self):\n df = pd.DataFrame([\n ['$1000'],\n ['$321. '],\n ['$4,321'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'string_replace',\n 'pattern': '\\\\$|\\\\.|\\\\,|\\\\s*',\n 'replacement': '',\n },\n outputs=[\n dict(\n uuid='amount_clean',\n column_type='true_or_false',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000', '1000'],\n ['$321. ', '321'],\n ['$4,321', '4321'],\n ], columns=[\n 'amount',\n 'amount_clean',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_string_split(self):\n df = pd.DataFrame([\n ['Street1, Long Beach, CA, '],\n ['Street2,Vernon, CA, 123'],\n ['Pacific Coast Highway, Los Angeles, CA, 111'],\n ], columns=[\n 'location',\n ])\n action = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 1,\n },\n outputs=[\n dict(\n uuid='location_city',\n column_type='text',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['location'],\n action_options={\n 'udf': 'string_split',\n 'separator': ',',\n 'part_index': 3,\n },\n outputs=[\n dict(\n uuid='num',\n column_type='number',\n ),\n ],\n )\n df_new = add_column(add_column(df, action), action2)\n df_expected = pd.DataFrame([\n ['Street1, Long Beach, CA, ', 'Long Beach', 0],\n ['Street2,Vernon, CA, 123', 'Vernon', 123],\n ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111],\n ], columns=[\n 'location',\n 'location_city',\n 'num',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_add_column_substring(self):\n df = pd.DataFrame([\n ['$1000.0'],\n ['$321.9'],\n ], columns=[\n 'amount',\n ])\n action = dict(\n action_arguments=['amount'],\n action_options={\n 'udf': 'substring',\n 'start': 1,\n 'stop': -2,\n },\n outputs=[\n dict(\n uuid='amount_int',\n column_type='text',\n ),\n ],\n )\n df_new = add_column(df, action)\n df_expected = pd.DataFrame([\n ['$1000.0', '1000'],\n ['$321.9', '321'],\n ], columns=[\n 'amount',\n 'amount_int',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_average(self):\n from data_cleaner.transformer_actions.column import average\n action = self.__groupby_agg_action('average_amount')\n df_new = average(TEST_DATAFRAME.copy(), action)\n df_expected = pd.DataFrame([\n [1, 1000, 1050],\n [2, 1050, 1100],\n [1, 1100, 1050],\n [2, 1150, 1100],\n ], columns=[\n 'group_id',\n 'amount',\n 'average_amount'\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_count(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1050,\n order_count=3,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=3,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_distinct(self):\n df = pd.DataFrame([\n [1, 1000],\n [1, 1000],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count_distinct(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1000,\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n order_count=1,\n ),\n ])\n\n def test_count_with_time_window(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='',\n action_options=dict(\n groupby_columns=['group_id'],\n timestamp_feature_a='group_churned_at',\n timestamp_feature_b='order_created_at',\n window=90*24*3600,\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n group_churned_at='2021-10-01',\n order_created_at='2021-09-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1050,\n group_churned_at='2021-10-01',\n order_created_at='2021-08-01',\n order_count=2,\n ),\n dict(\n group_id=1,\n order_id=1100,\n group_churned_at='2021-10-01',\n order_created_at='2021-01-01',\n order_count=2,\n ),\n dict(\n group_id=2,\n order_id=1150,\n group_churned_at='2021-09-01',\n order_created_at='2021-08-01',\n order_count=1,\n ),\n ])\n\n def test_count_with_filter(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n [2, 1200, '2021-09-01', '2021-08-16'],\n [2, 1250, '2021-09-01', '2021-08-14'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_code='order_created_at < \\'2021-08-15\\'',\n action_options=dict(\n groupby_columns=['group_id'],\n ),\n outputs=[\n dict(uuid='order_count'),\n ],\n )\n df_new = count(df, action)\n df_expected = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01', 2],\n [1, 1050, '2021-10-01', '2021-08-01', 2],\n [1, 1100, '2021-10-01', '2021-01-01', 2],\n [2, 1150, '2021-09-01', '2021-08-01', 2],\n [2, 1200, '2021-09-01', '2021-08-16', 2],\n [2, 1250, '2021-09-01', '2021-08-14', 2],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n 'order_count',\n ])\n assert_frame_equal(df_new, df_expected)\n\n def test_diff(self):\n df = pd.DataFrame([\n ['2020-01-01', 1000],\n ['2020-01-02', 1050],\n ['2020-01-03', 1200],\n ['2020-01-04', 990],\n ], columns=[\n 'date',\n 'sold',\n ])\n action = dict(\n action_arguments=['sold'],\n outputs=[\n dict(uuid='sold_diff'),\n ],\n )\n df_new = diff(df, action)\n self.assertEqual(df_new.to_dict(orient='records')[1:], [\n dict(\n date='2020-01-02',\n sold=1050,\n sold_diff=50,\n ),\n dict(\n date='2020-01-03',\n sold=1200,\n sold_diff=150,\n ),\n dict(\n date='2020-01-04',\n sold=990,\n sold_diff=-210,\n ),\n ])\n\n # def test_expand_column(self):\n # df = pd.DataFrame([\n # [1, 'game'],\n # [1, 'book'],\n # [1, 'game'],\n # [2, 'Video Game'],\n # [1, 'Video Game'],\n # [2, 'book'],\n # [1, 'Video Game'],\n # [2, 'Video Game'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id']\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game'),\n # dict(uuid='category_expanded_count_book'),\n # dict(uuid='category_expanded_count_video_game'),\n # dict(uuid='category_expanded_count_clothing'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', 2, 1, 2],\n # [1, 'book', 2, 1, 2],\n # [1, 'game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'book', 0, 1, 2],\n # [1, 'Video Game', 2, 1, 2],\n # [2, 'Video Game', 0, 1, 2],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'category_expanded_count_game',\n # 'category_expanded_count_book',\n # 'category_expanded_count_video_game',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n # def test_expand_column_with_time_window(self):\n # df = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04'],\n # [1, 'book', '2021-01-02', '2021-01-04'],\n # [1, 'game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2021-01-01', '2021-01-03'],\n # [1, 'Video Game', '2021-01-01', '2021-01-04'],\n # [2, 'book', '2021-01-02', '2021-01-03'],\n # [1, 'Video Game', '2021-01-03', '2021-01-04'],\n # [2, 'Video Game', '2020-12-30', '2021-01-03'],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # ])\n # action = dict(\n # action_arguments=['category'],\n # action_options=dict(\n # groupby_columns=['group_id'],\n # timestamp_feature_a='timestamp2',\n # timestamp_feature_b='timestamp1',\n # window=172800,\n # ),\n # outputs=[\n # dict(uuid='category_expanded_count_game_2d'),\n # dict(uuid='category_expanded_count_book_2d'),\n # dict(uuid='category_expanded_count_video_game_2d'),\n # dict(uuid='category_expanded_count_clothing_2d'),\n # ],\n # )\n # df_new = expand_column(df, action)\n # df_expected = pd.DataFrame([\n # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1],\n # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1],\n # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1],\n # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1],\n # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1],\n # ], columns=[\n # 'group_id',\n # 'category',\n # 'timestamp1',\n # 'timestamp2',\n # 'category_expanded_count_game_2d',\n # 'category_expanded_count_book_2d',\n # 'category_expanded_count_video_game_2d',\n # ])\n # assert_frame_equal(df_new, df_expected)\n\n def test_first_column(self):\n df = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n ], columns=[\n 'group_id',\n 'order_id',\n ])\n action = dict(\n action_arguments=['order_id'],\n action_options=dict(\n groupby_columns=['group_id']\n ),\n outputs=[\n dict(uuid='first_order'),\n ],\n )\n df_new = first(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n group_id=1,\n order_id=1000,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1050,\n first_order=1050,\n ),\n dict(\n group_id=1,\n order_id=1100,\n first_order=1000,\n ),\n dict(\n group_id=2,\n order_id=1150,\n first_order=1050,\n ),\n ])\n\n def test_impute(self):\n from data_cleaner.transformer_actions.column import impute\n df = pd.DataFrame([\n ['2020-01-01', 1000, ' ', 800],\n ['2020-01-02', '', 1200, 700],\n ['2020-01-03', 1200, np.NaN, 900],\n ['2020-01-04', np.NaN, ' ', 700],\n ['2020-01-05', 1700, 1300, 800],\n ], columns=[\n 'date',\n 'sold',\n 'curr_profit',\n 'prev_sold',\n ])\n action1 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n '1': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'curr_profit',\n },\n 'type': 'feature',\n },\n },\n )\n action2 = dict(\n action_arguments=['sold'],\n action_options={\n 'value': '0',\n },\n action_variables={\n '0': {\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'sold',\n },\n 'type': 'feature',\n },\n },\n )\n action3 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'average',\n },\n )\n action4 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'median',\n },\n )\n action5 = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'column',\n 'value': 'prev_sold',\n },\n )\n action_invalid = dict(\n action_arguments=['sold', 'curr_profit'],\n action_options={\n 'strategy': 'mode',\n },\n )\n df_new1 = impute(df.copy(), action1)\n df_new2 = impute(df.copy(), action2)\n df_new3 = impute(df.copy(), action3)\n df_new4 = impute(df.copy(), action4)\n df_new5 = impute(df.copy(), action5)\n\n df_expected1 = pd.DataFrame([\n ['2020-01-01', 1000, 0, 800],\n ['2020-01-02', 0, 1200, 700],\n ['2020-01-03', 1200, 0, 900],", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.column import (\n add_column,\n count,\n count_distinct,\n diff,\n # expand_column,\n first,\n last,\n remove_column,\n select,\n shift_down,\n shift_up,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\nTEST_DATAFRAME = pd.DataFrame([\n [1, 1000],\n [2, 1050],\n [1, 1100],\n [2, 1150],\n], columns=[\n 'group_id',\n 'amount',\n])\n\n\nclass ColumnTests(TestCase):\n def test_remove_column(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n action = dict(action_arguments=['string'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n integer=0,\n boolean=False,\n ),\n dict(\n integer=1,\n boolean=True,\n ),\n ])\n\n action = dict(action_arguments=['integer', 'boolean'])\n\n df_new = remove_column(df, action)\n self.assertEqual(df_new.to_dict(orient='records'), [\n dict(\n string='a',\n ),\n dict(\n string='b',\n ),\n ])\n\n def test_add_column_addition(self):\n df = pd.DataFrame([\n [1, 3, 7, 9],\n [4, 2, 9, 3],\n ], columns=[\n 'integer1',\n 'integer2',\n 'integer3',\n 'integer4',\n ])\n action1 = dict(\n action_arguments=[\n 'integer1',\n 'integer2',\n 'integer3',\n ],\n action_options={\n 'udf': 'addition',\n 'value': None,\n },\n outputs=[\n dict(\n uuid='integer_addition',\n column_type='number',\n ),\n ],\n )\n action2 = dict(\n action_arguments=['integer1'],\n action_options={\n 'udf': 'addition',\n 'value': 10,\n },\n outputs=[\n dict(\n uuid='integer_addition2',\n column_type='number',\n ),\n ],\n )\n action3 = dict(\n action_arguments=['integer1', 'integer4'],\n action_options={\n 'udf': 'addition',", "type": "random" } ]
[ " action = self.__groupby_agg_action('total_amount')", " action = self.__groupby_agg_action('average_amount')", " action = self.__groupby_agg_action('min_amount')", " action = self.__groupby_agg_action('max_amount')", " action = self.__groupby_agg_action('median_amount')", " df_new = add_column(df, action, original_df=df)", " df_new = count_distinct(df, action)", " df_new = add_column(add_column(df, action), action2)", " select,", " shift_down,", " shift_up,", " df_new = add_column(add_column(df, action1), action2)", " df_new = diff(df, action)", " df_new2 = max(TEST_DATAFRAME.copy(), action2)", " df_new5 = impute(df.copy(), action5)", " df_new = first(df, action)", " df_new = shift_down(df, action)", " df_new = add_column(df, action)", " df_new = median(df, action)", " df_new = remove_column(df, action)", " df_new = count(df, action)", " df_new = average(TEST_DATAFRAME.copy(), action)", " df_new = shift_up(df, action)", " df_new = min(TEST_DATAFRAME.copy(), action)", " _ = impute(df.copy(), action_invalid)", " df_new = sum(TEST_DATAFRAME.copy(), action)", " df_new = select(df, action)", " add_column(", " df_new = last(df, action)", " add_column(df, action1),", " df_new1 = impute(df.copy(), action1)", " df_new = add_column(", " df_new2 = impute(df.copy(), action2)", " df_new3 = impute(df.copy(), action3)", " df_new4 = impute(df.copy(), action4)", " df_new = max(TEST_DATAFRAME.copy(), action)", " # df_new2 = add_column(df, action2)", " # ],", " def test_add_column_distance_between(self):", " def test_shift_up(self):", " # dict(", " user_id='test_user_id',", " action_arguments=['sold'],", " 'integer3',", " ['2020-01-04', 0, 0, 700],", " 'value': 10," ]
METASEP
20
mage-ai__mage-ai
mage-ai__mage-ai METASEP cleaning/__init__.py METASEP src/data_cleaner/transformer_actions/udf/substring.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Substring(BaseUDF): def execute(self): start = self.options.get('start') stop = self.options.get('stop') if start is None and stop is None: raise Exception('Require at least one of `start` and `stop` parameters.') return self.df[self.arguments[0]].str.slice(start=start, stop=stop) src/data_cleaner/transformer_actions/udf/string_split.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class StringSplit(BaseUDF): def execute(self): separator = self.options.get('separator') part_index = self.options.get('part_index') if separator is None or part_index is None: raise Exception('Require both `separator` and `part_index` parameters.') return self.df[self.arguments[0]].str.split(separator).str[part_index].str.strip() src/data_cleaner/transformer_actions/udf/string_replace.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class StringReplace(BaseUDF): def execute(self): pattern = self.options.get('pattern') replacement = self.options.get('replacement') if not pattern and not replacement: raise Exception(f'Require both `pattern` and `replacement` parameters.') return self.df[self.arguments[0]].str.replace(pattern, replacement) src/data_cleaner/transformer_actions/udf/multiply.py METASEP from transformer_actions.udf.base import BaseUDF class Multiply(BaseUDF): def execute(self): col1 = self.arguments[0] if len(self.arguments) > 1: col2 = self.arguments[1] return self.df[col1].astype(float) * self.df[col2].astype(float) elif self.options.get('value') is not None: return self.df[col1] * float(self.options['value']) raise Exception('Require second column or a value to multiply.') src/data_cleaner/transformer_actions/udf/if_else.py METASEP from data_cleaner.transformer_actions.action_code import query_with_action_code from data_cleaner.transformer_actions.udf.base import BaseUDF class IfElse(BaseUDF): def execute(self): df_copy = self.df.copy() true_index = query_with_action_code(df_copy, self.code, self.kwargs).index arg1_type = self.options.get('arg1_type', 'value') arg2_type = self.options.get('arg2_type', 'value') arg1 = self.arguments[0] if arg1_type == 'column': arg1 = df_copy[arg1] arg2 = self.arguments[1] if arg2_type == 'column': arg2 = df_copy[arg2] df_copy.loc[true_index, 'result'] = arg1 df_copy['result'] = df_copy['result'].fillna(arg2) return df_copy['result'] src/data_cleaner/transformer_actions/udf/formatted_date.py METASEP from transformer_actions.udf.base import BaseUDF import pandas as pd class FormattedDate(BaseUDF): def execute(self): return pd.to_datetime( self.df[self.arguments[0]], ).dt.strftime(self.options['format']) src/data_cleaner/transformer_actions/udf/divide.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Divide(BaseUDF): def execute(self): col1 = self.arguments[0] if len(self.arguments) > 1: col2 = self.arguments[1] return self.df[col1].astype(float) / self.df[col2].astype(float) elif self.options.get('value') is not None: return self.df[col1] / float(self.options['value']) raise Exception('Require second column or a value to divide.') src/data_cleaner/transformer_actions/udf/distance_between.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np EARTH_RADIUS = 6371 class DistanceBetween(BaseUDF): def execute(self): def __haversine(lat1, lng1, lat2, lng2): lat1, lng1, lat2, lng2 = np.radians([lat1, lng1, lat2, lng2]) a = np.sin((lat2-lat1)/2.0)**2 + \ np.cos(lat1) * np.cos(lat2) * np.sin((lng2-lng1)/2.0)**2 return EARTH_RADIUS * 2 * np.arcsin(np.sqrt(a)) return __haversine( self.df[self.arguments[0]], self.df[self.arguments[1]], self.df[self.arguments[2]], self.df[self.arguments[3]], ) src/data_cleaner/transformer_actions/udf/difference.py METASEP from data_cleaner.column_type_detector import DATETIME from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np import pandas as pd class Difference(BaseUDF): def execute(self): col1 = self.arguments[0] column_type = self.options.get('column_type', self.kwargs.get('column_types', {}).get(col1)) if len(self.arguments) > 1: col2 = self.arguments[1] return self.__difference_between_columns( self.df[col1], self.df[col2], column_type=column_type, options=self.options, ) elif self.options.get('value') is not None: return self.__subtract_value( self.df[col1], self.options['value'], column_type=column_type, options=self.options, ) raise Exception('Require second column or a value to minus.') def __difference_between_columns(self, column1, column2, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return (pd.to_datetime(column1, utc=True) - pd.to_datetime(column2, utc=True)).dt.days return column1 - column2 def __subtract_value(self, original_column, value, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return ( pd.to_datetime(original_column, utc=True) - pd.to_timedelta(value, unit=time_unit) ).dt.strftime('%Y-%m-%d %H:%M:%S') return original_column - value src/data_cleaner/transformer_actions/udf/date_trunc.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np import pandas as pd class DateTrunc(BaseUDF): def execute(self): date_part = self.options['date_part'] date_column = self.arguments[0] df_copy = self.df.copy() df_copy[date_column] = pd.to_datetime(df_copy[date_column]) if date_part == 'week': return (df_copy[date_column] - df_copy[date_column].dt.weekday * np.timedelta64(1, 'D')).\ dt.strftime('%Y-%m-%d') raise Exception(f'Date part {date_part} is not supported.') src/data_cleaner/transformer_actions/udf/constant.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Constant(BaseUDF): def execute(self): return self.arguments[0] src/data_cleaner/transformer_actions/udf/base.py METASEP import importlib class BaseUDF(): def __init__(self, df, arguments=[], code=None, options={}, kwargs={}): self.df = df self.arguments = arguments self.code = code self.options = options self.kwargs = kwargs def execute(self): pass def execute_udf(udf_name, df, arguments, code, options, kwargs): udf_class = getattr( importlib.import_module(f'transformer_actions.udf.{udf_name}'), udf_name.title().replace('_', ''), ) return udf_class(df, arguments, code, options, kwargs).execute() src/data_cleaner/transformer_actions/udf/addition.py METASEP from data_cleaner.column_type_detector import DATETIME from data_cleaner.transformer_actions.udf.base import BaseUDF import pandas as pd class Addition(BaseUDF): def execute(self): col1 = self.arguments[0] df_result = self.df[col1] column_type = self.options.get("column_type", self.kwargs.get('column_types', {}).get(col1)) if len(self.arguments) == 1 and 'value' not in self.options: raise Exception('Require second column or a value to add.') if len(self.arguments) > 1: for col in self.arguments[1:]: df_result = df_result + self.df[col] if self.options.get('value') is not None: df_result = self.__add_value( df_result, self.options['value'], column_type=column_type, options=self.options, ) return df_result def __add_value(self, original_column, value, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return ( pd.to_datetime(original_column, utc=True) + pd.to_timedelta(value, unit=time_unit) ).dt.strftime('%Y-%m-%d %H:%M:%S') return original_column + value src/data_cleaner/transformer_actions/udf/__init__.py METASEP src/server/hello.py METASEP from flask import Flask app = Flask(__name__) @app.route("/") def hello_world(): return "<p>Hello, World!</p>" src/data_cleaner/transformer_actions/variable_replacer.py METASEP from data_cleaner.transformer_actions.constants import VariableType import re def interpolate(text, key, variable_data): """ text: string to operate on key: key to search within text variable_data: dictionary containing data used to interpolate """ regex_replacement = key if variable_data['type'] == VariableType.FEATURE: regex_replacement = variable_data[VariableType.FEATURE]['uuid'] elif variable_data['type'] == VariableType.FEATURE_SET_VERSION: regex_replacement = \ variable_data[VariableType.FEATURE_SET_VERSION][VariableType.FEATURE_SET]['uuid'] regex_pattern = re.compile( '\%__BRACKETS_START__{}__BRACKETS_END__' .format(key) .replace('__BRACKETS_START__', '\{') .replace('__BRACKETS_END__', '\}') ) return re.sub(regex_pattern, regex_replacement, str(text)) def replace_true_false(action_code): regex_pattern_true = re.compile(' true') regex_pattern_false = re.compile(' false') return re.sub( regex_pattern_true, ' True', re.sub(regex_pattern_false, ' False', action_code), ) src/data_cleaner/transformer_actions/utils.py METASEP from data_cleaner.transformer_actions.constants import ActionType, Axis def columns_to_remove(transformer_actions): arr = filter( lambda x: x['action_type'] == ActionType.REMOVE and x['axis'] == Axis.COLUMN, transformer_actions, ) columns = [] for transformer_action in arr: columns += transformer_action['action_arguments'] return columns src/data_cleaner/transformer_actions/shared.py METASEP TEST_ACTION = dict( action_type='filter', axis='row', action_code='%{1}.%{1_1} == True and (%{1}.%{1_2} == \"The Quant\" or %{1}.%{1_2} == \"Yield\")', action_arguments=[ '%{1}.%{1_1}', '%{2}.%{2_1}', ], action_options=dict( condition='%{1}.%{1_3} >= %{2}.%{2_2} and %{2}.%{2_2} >= %{1}.%{1_3} - 2592000', default=0, timestamp_feature_a='%{1}.%{1_2}', timestamp_feature_b='%{1}.%{1_3}', window=2592000, ), action_variables={ '1': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='omni'), )), '1_1': dict(type='feature', feature=dict(column_type='number', uuid='deposited')), '1_2': dict(type='feature', feature=dict(column_type='category', uuid='fund')), '1_3': dict(type='feature', feature=dict(column_type='category', uuid='delivered_at')), '1_4': dict(type='feature', feature=dict(column_type='number_with_decimals', uuid='amount')), '2': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='magic'), )), '2_1': dict(type='feature', feature=dict(column_type='category', uuid='spell')), '2_2': dict(type='feature', feature=dict(column_type='category', uuid='booked_at')), '3_1': dict(type='feature', feature=dict(column_type='number', uuid='age')), }, ) src/data_cleaner/transformer_actions/row.py METASEP from data_cleaner.column_type_detector import NUMBER_TYPES from data_cleaner.transformer_actions.constants import VariableType from data_cleaner.transformer_actions.action_code import query_with_action_code import pandas as pd def drop_duplicates(df, action, **kwargs): keep = action.get('action_options', {}).get('keep', 'last') return df.drop_duplicates(subset=action['action_arguments'], keep=keep) def filter_rows(df, action, **kwargs): """ df: Pandas DataFrame action: TransformerAction serialized into a dictionary """ action_code = action['action_code'] return query_with_action_code(df, action_code, kwargs) def sort_rows(df, action, **kwargs): ascending = action.get('action_options', {}).get('ascending', True) ascendings = action.get('action_options', {}).get('ascendings', []) if len(ascendings) > 0: ascending = ascendings[0] feature_by_uuid = {} if action.get('action_variables'): for _, val in action['action_variables'].items(): feature = val.get('feature') if feature: feature_by_uuid[feature['uuid']] = feature na_indexes = None as_types = {} for idx, uuid in enumerate(action['action_arguments']): feature = feature_by_uuid.get(uuid) if feature and feature['column_type'] in NUMBER_TYPES: as_types[uuid] = float if idx == 0: na_indexes = df[(df[uuid].isnull()) | (df[uuid].astype(str).str.len() == 0)].index bad_df = None if na_indexes is not None: bad_df = df.index.isin(na_indexes) index = (df[~bad_df] if bad_df is not None else df).astype(as_types).sort_values( by=action['action_arguments'], ascending=ascendings if len(ascendings) > 0 else ascending, ).index df_final = df.loc[index] if bad_df is not None: if ascending: return pd.concat([ df.iloc[bad_df], df_final, ]) return pd.concat([ df_final, df.iloc[bad_df], ]) return df_final src/data_cleaner/transformer_actions/helpers.py METASEP from data_cleaner.column_type_detector import NUMBER, NUMBER_WITH_DECIMALS, TEXT from data_cleaner.transformer_actions.constants import ActionType, Operator, VariableType import numpy as np import re DAY_SECONDS = 86400 HOUR_SECONDS = 3600 def convert_col_type(df_col, col_type): if col_type == NUMBER: return df_col.replace(r'^\s*$', 0, regex=True).fillna(0).astype(np.int64) elif col_type == NUMBER_WITH_DECIMALS: return df_col.dropna().astype(float) elif col_type == TEXT: return df_col.dropna().astype(str) return df_col def convert_value_type(feature_uuid, action, value): action_variables = action.get('action_variables', {}) column_type = None for v in action_variables.values(): if v['type'] == 'feature' and v['feature']['uuid'] == feature_uuid: column_type = v['feature']['column_type'] break if column_type == NUMBER: value = int(value) elif column_type == NUMBER_WITH_DECIMALS: value = float(value) return value def drop_na(df): return df.replace(r'^\s*$', np.nan, regex=True).dropna() def extract_join_feature_set_version_id(payload): if payload['action_type'] != ActionType.JOIN: return None join_feature_set_version_id = payload['action_arguments'][0] if type(join_feature_set_version_id) == str and \ join_feature_set_version_id.startswith('%{'): join_feature_set_version_id = next( v['id'] for v in payload['action_variables'].values() if v['type'] == VariableType.FEATURE_SET_VERSION ) return join_feature_set_version_id def get_column_type(feature_uuid, action): action_variables = action.get('action_variables', {}) column_type = None for v in action_variables.values(): if v['type'] == 'feature' and v['feature']['uuid'] == feature_uuid: column_type = v['feature']['column_type'] break return column_type def get_time_window_str(window_in_seconds): if window_in_seconds is None: return None if window_in_seconds >= DAY_SECONDS: time_window = f'{int(window_in_seconds / DAY_SECONDS)}d' elif window_in_seconds >= HOUR_SECONDS: time_window = f'{int(window_in_seconds / HOUR_SECONDS)}h' else: time_window = f'{window_in_seconds}s' return time_window src/data_cleaner/transformer_actions/constants.py METASEP class ActionType(): ADD = 'add' AVERAGE = 'average' COUNT = 'count' COUNT_DISTINCT = 'count_distinct' DIFF = 'diff' DROP_DUPLICATE = 'drop_duplicate' EXPAND_COLUMN = 'expand_column' EXPLODE = 'explode' FILTER = 'filter' FIRST = 'first' GROUP = 'group' IMPUTE = 'impute' JOIN = 'join' LAST = 'last' LIMIT = 'limit' MAX = 'max' MEDIAN = 'median' MIN = 'min' MODE = 'mode' REMOVE = 'remove' SCALE = 'scale' SELECT = 'select' SHIFT_DOWN = 'shift_down' SHIFT_UP = 'shift_up' SORT = 'sort' SUM = 'sum' UNION = 'union' UPDATE_TYPE = 'update_type' UPDATE_VALUE = 'update_value' class Axis(): COLUMN = 'column' ROW = 'row' class VariableType(): FEATURE = 'feature' FEATURE_SET = 'feature_set' FEATURE_SET_VERSION = 'feature_set_version' class Operator(): CONTAINS = 'contains' NOT_CONTAINS = 'not contains' EQUALS = '==' NOT_EQUALS = '!=' GREATER_THAN = '>' GREATER_THAN_OR_EQUALS = '>=' LESS_THAN = '<' LESS_THAN_OR_EQUALS = '<=' src/data_cleaner/transformer_actions/column.py METASEP from data_cleaner.transformer_actions.action_code import query_with_action_code from data_cleaner.transformer_actions.helpers import ( convert_col_type, get_column_type, get_time_window_str, ) from data_cleaner.transformer_actions.udf.base import execute_udf import pandas as pd import numpy as np def add_column(df, action, **kwargs): col = action['outputs'][0]['uuid'] col_type = action['outputs'][0]['column_type'] udf = action['action_options'].get('udf') if udf is None: return df df_copy = df.copy() df_copy[col] = execute_udf( udf, df, action.get('action_arguments'), action.get('action_code'), action.get('action_options'), kwargs, ) df_copy[col] = convert_col_type(df_copy[col], col_type) return df_copy def average(df, action, **kwargs): return __agg(df, action, 'mean') def count(df, action, **kwargs): return __groupby_agg(df, action, 'count') def count_distinct(df, action, **kwargs): return __groupby_agg(df, action, 'nunique') def diff(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].diff() return df def first(df, action, **kwargs): return __agg(df, action, 'first') def impute(df, action, **kwargs): columns = action['action_arguments'] action_options = action['action_options'] strategy = action_options.get('strategy') value = action_options.get('value') empty_string_pattern = r'^\s*$' df[columns] = df[columns].replace(empty_string_pattern, np.nan, regex=True) if strategy == 'average': df[columns] = df[columns].fillna(df[columns].astype(float).mean(axis=0)) elif strategy == 'median': df[columns] = df[columns].fillna(df[columns].astype(float).median(axis=0)) elif strategy == 'column': replacement_df = pd.DataFrame({col: df[value] for col in columns}) df[columns] = df[columns].fillna(replacement_df) elif value is not None: df[columns] = df[columns].fillna(value) else: raise Exception('Require a valid strategy or value') for col in columns: col_type = get_column_type(col, action) df[col] = convert_col_type(df[col], col_type) return df def max(df, action, **kwargs): return __agg(df, action, 'max') def median(df, action, **kwargs): return __agg(df, action, 'median') def min(df, action, **kwargs): return __agg(df, action, 'min') def remove_column(df, action, **kwargs): cols = action['action_arguments'] original_columns = df.columns drop_columns = [col for col in cols if col in original_columns] return df.drop(columns=drop_columns) def last(df, action, **kwargs): return __agg(df, action, 'last') def select(df, action, **kwargs): return df[action['action_arguments']] def shift_down(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] action_options = action.get('action_options', {}) groupby_columns = action_options.get('groupby_columns') periods = action_options.get('periods', 1) if groupby_columns is not None: df[output_col] = df.groupby(groupby_columns)[action['action_arguments'][0]].shift(periods) else: df[output_col] = df[action['action_arguments'][0]].shift(periods) return df def shift_up(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].shift(-1) return df def sum(df, action, **kwargs): return __agg(df, action, 'sum') def __agg(df, action, agg_method): if action['action_options'].get('groupby_columns'): return __groupby_agg(df, action, agg_method) else: output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].agg(agg_method) return df def __column_mapping(action): return dict(zip(action['action_arguments'], [o['uuid'] for o in action['outputs']])) # Filter by timestamp_feature_a - window <= timestamp_feature_b <= timestamp_feature_a def __filter_df_with_time_window(df, action): action_options = action['action_options'] time_window_keys = ['timestamp_feature_a', 'timestamp_feature_b', 'window'] if all(k in action_options for k in time_window_keys): window_in_seconds = action_options['window'] df_time_diff = \ (pd.to_datetime(df[action_options['timestamp_feature_a']], utc=True) - \ pd.to_datetime(df[action_options['timestamp_feature_b']], utc=True)).dt.total_seconds() if window_in_seconds > 0: df_time_diff_filtered = \ df_time_diff[(df_time_diff <= window_in_seconds) & (df_time_diff >= 0)] else: df_time_diff_filtered = \ df_time_diff[(df_time_diff >= window_in_seconds) & (df_time_diff <= 0)] df_filtered = df.loc[df_time_diff_filtered.index] time_window = get_time_window_str(window_in_seconds) else: df_filtered = df time_window = None return df_filtered, time_window def __groupby_agg(df, action, agg_method): df_filtered, _ = __filter_df_with_time_window(df, action) action_code = action.get('action_code') if action_code is not None and action_code != '': df_filtered = query_with_action_code(df_filtered, action_code, { 'original_df': df_filtered, }) action_options = action['action_options'] df_agg = df_filtered.groupby( action_options['groupby_columns'], )[action['action_arguments']].agg(agg_method) return df.merge( df_agg.rename(columns=__column_mapping(action)), on=action_options['groupby_columns'], how='left', ) src/data_cleaner/transformer_actions/base.py METASEP from data_cleaner.transformer_actions import column, row from data_cleaner.transformer_actions.constants import ActionType, Axis, VariableType from data_cleaner.transformer_actions.helpers import drop_na from data_cleaner.transformer_actions.variable_replacer import ( interpolate, replace_true_false, ) # from pipelines.column_type_pipelines import COLUMN_TYPE_PIPELINE_MAPPING import json COLUMN_TYPE_PIPELINE_MAPPING = {} FUNCTION_MAPPING = { Axis.COLUMN: { ActionType.ADD: column.add_column, ActionType.AVERAGE: column.average, ActionType.COUNT: column.count, ActionType.COUNT_DISTINCT: column.count_distinct, ActionType.DIFF: column.diff, # ActionType.EXPAND_COLUMN: column.expand_column, ActionType.FIRST: column.first, ActionType.IMPUTE: column.impute, ActionType.LAST: column.last, ActionType.MAX: column.max, ActionType.MEDIAN: column.median, ActionType.MIN: column.min, ActionType.REMOVE: column.remove_column, ActionType.SELECT: column.select, ActionType.SHIFT_DOWN: column.shift_down, ActionType.SHIFT_UP: column.shift_up, ActionType.SUM: column.sum, }, Axis.ROW: { ActionType.DROP_DUPLICATE: row.drop_duplicates, # ActionType.EXPLODE: row.explode, ActionType.FILTER: row.filter_rows, ActionType.SORT: row.sort_rows, }, } class BaseAction(): def __init__(self, action): self.action = action self.columns_by_type = {} for variable_data in self.action.get('action_variables', {}).values(): if not variable_data: continue feature = variable_data.get(VariableType.FEATURE) if not feature: continue column_type = feature.get('column_type') if not self.columns_by_type.get(column_type): self.columns_by_type[column_type] = [] self.columns_by_type[column_type].append(feature['uuid']) @property def action_type(self): return self.action['action_type'] @property def axis(self): return self.action['axis'] def execute(self, df, **kwargs): self.hydrate_action() self.action['action_code'] = replace_true_false(self.action['action_code']) if df.empty: return df if self.action_type in [ActionType.FILTER, ActionType.ADD]: df_transformed = self.transform(df) else: df_transformed = df if self.action_type == ActionType.GROUP: df_output = self.groupby(df, self.action) elif self.action_type == ActionType.JOIN: df_to_join = kwargs.get('df_to_join') df_output = self.join(df, df_to_join, self.action) else: column_types = {} for column_type, cols in self.columns_by_type.items(): for col in cols: column_types[col] = column_type df_output = FUNCTION_MAPPING[self.axis][self.action_type]( df_transformed, self.action, column_types=column_types, original_df=df, ) if self.action_type == ActionType.FILTER: return df.loc[df_output.index][df_output.columns] elif self.action_type == ActionType.ADD: output_cols = [f['uuid'] for f in self.action['outputs']] df[output_cols] = df_output[output_cols] return df else: return df_output def groupby(self, df, action): def __transform_partition(pdf, actions): for action in actions: pdf = BaseAction(action).execute(pdf) return pdf groupby_columns = action['action_arguments'] return df.groupby(groupby_columns).apply(lambda x: __transform_partition(x, action['child_actions'])) def hydrate_action(self): for k, v in self.action['action_variables'].items(): """ k: 1, 1_1 v: { 'type': 'feature', 'id': 1, 'feature': { 'uuid': 'mage', }, } """ if not v: continue if self.action.get('action_code'): self.action['action_code'] = interpolate(self.action['action_code'], k, v) if self.action.get('action_arguments'): self.action['action_arguments'] = [interpolate( args_text, k, v, ) for args_text in self.action['action_arguments']] if self.action.get('action_options'): action_options_json = json.dumps(self.action['action_options']) self.action['action_options'] = json.loads(interpolate(action_options_json, k, v)) def join(self, df, df_to_join, action): action_options = action['action_options'] left_on = action_options['left_on'] right_on = action_options['right_on'] for i in range(len(left_on)): col1, col2 = left_on[i], right_on[i] if df[col1].dtype != df_to_join[col2].dtype: df[col1] = drop_na(df[col1]).astype(str) df_to_join[col2] = drop_na(df_to_join[col2]).astype(str) if action.get('outputs') is not None: feature_rename_mapping = { f['source_feature']['uuid']:f['uuid'] for f in action['outputs'] if f.get('source_feature') is not None } df_to_join_renamed = df_to_join.rename(columns=feature_rename_mapping) right_on = [feature_rename_mapping.get(key, key) for key in right_on] else: df_to_join_renamed = df_to_join how = action_options.get('how', 'left') df_merged = df.merge(df_to_join_renamed, left_on=left_on, right_on=right_on, how=how) drop_columns = action_options.get('drop_columns', []) rename_columns = action_options.get('rename_columns', {}) return df_merged.drop(columns=drop_columns).rename(columns=rename_columns) def transform(self, df): df_copy = df.copy() current_columns = df_copy.columns for column_type, original_columns in self.columns_by_type.items(): cols = [col for col in original_columns if col in current_columns] if len(cols) == 0: continue build_pipeline = COLUMN_TYPE_PIPELINE_MAPPING.get(column_type) if not build_pipeline: continue df_copy[cols] = build_pipeline().fit_transform(df_copy[cols]) return df_copy src/data_cleaner/transformer_actions/action_code.py METASEP from data_cleaner.transformer_actions.constants import Operator import re ACTION_CODE_CONDITION_PATTERN = re.compile( r'([^\s()]+) ([!=<>]+|(?:contains)|(?:not contains)) ([^\s()]+)' ) ORIGINAL_COLUMN_PREFIX = 'orig_' TRANSFORMED_COLUMN_PREFIX = 'tf_' def __query_mutate_null_type(match, dtype): condition = [''] column_name, operator, _ = match.groups() column_name = f'{ORIGINAL_COLUMN_PREFIX}{column_name}' if operator == '==': condition.append(f'({column_name}.isna()') if dtype == bool: condition.append(f' | {column_name} == \'\'') elif dtype == str: condition.append(f' | {column_name}.str.len() == 0') condition.append(f')') else: condition.append(f'({column_name}.notna()') if dtype == bool: condition.append(f' & {column_name} != \'\'') elif dtype == str: condition.append(f' & {column_name}.str.len() >= 1') condition.append(f')') return ''.join(condition) def __query_mutate_contains_op(match): column_name, operator, value = match.groups() column_name = f'{TRANSFORMED_COLUMN_PREFIX}{column_name}' value = value.strip('\'').strip('\"') if operator == Operator.CONTAINS: condition = f'({column_name}.notna() & {column_name}.str.contains(\'{value}\'))' else: condition = f'~({column_name}.notna() & {column_name}.str.contains(\'{value}\'))' return condition def __query_mutate_default_case(match, column_set): column_name, operator, value = match.groups() column_name = f'{TRANSFORMED_COLUMN_PREFIX}{column_name}' if value in column_set: # if comparison is with another column, prefix value with column identifier value = f'{TRANSFORMED_COLUMN_PREFIX}{value}' return f'{column_name} {operator} {value}' def __get_column_type(df, cache, column_name): dtype = cache.get(column_name, None) if dtype is None: dropped_na = df[column_name].dropna() dropped_na = dropped_na[~dropped_na.isin([''])] dtype = type(dropped_na.iloc[0]) if len(dropped_na.index) >= 1 else object cache[column_name] = dtype return dtype def query_with_action_code(df, action_code, kwargs): transformed_types, original_types = {}, {} original_df, original_merged = kwargs.get('original_df', None), False reconstructed_code = [] queried_df = df.copy().add_prefix(TRANSFORMED_COLUMN_PREFIX) column_set = set(df.columns) prev_end = 0 for match in ACTION_CODE_CONDITION_PATTERN.finditer(action_code): column_name, operator, value = match.groups() reconstructed_code.append(action_code[prev_end: match.start()]) prev_end = match.end() if operator == Operator.CONTAINS or operator == Operator.NOT_CONTAINS: transformed_dtype = __get_column_type(df, transformed_types, column_name) if transformed_dtype != str: raise TypeError( f'\'{operator}\' can only be used on string columns, {transformed_dtype}' ) reconstructed_code.append(__query_mutate_contains_op(match)) elif (operator == Operator.EQUALS or operator == Operator.NOT_EQUALS) and value == 'null': if original_df is None: raise Exception( 'Null value queries require original dataframe as keyword argument' ) elif not original_merged: queried_df = queried_df.join(original_df.add_prefix(ORIGINAL_COLUMN_PREFIX)) original_merged = True original_dtype = __get_column_type(original_df, original_types, column_name) reconstructed_code.append(__query_mutate_null_type(match, original_dtype)) else: reconstructed_code.append(__query_mutate_default_case(match, column_set)) reconstructed_code.append(action_code[prev_end:]) action_code = ''.join(reconstructed_code) queried_df = queried_df.query(action_code).rename( lambda x: x[len(TRANSFORMED_COLUMN_PREFIX):], axis='columns' ) return queried_df[df.columns] src/data_cleaner/transformer_actions/__init__.py METASEP src/data_cleaner/statistics/calculator.py METASEP from data_cleaner.shared.hash import merge_dict from data_cleaner.shared.multi import run_parallel from data_cleaner.column_type_detector import ( DATETIME, NUMBER, NUMBER_TYPES, NUMBER_WITH_DECIMALS, ) import math import numpy as np import pandas as pd import traceback VALUE_COUNT_LIMIT = 255 def increment(metric, tags): pass class timer(object): """ with timer('metric.metric', tags={ 'key': 'value' }): function() """ def __init__(self, metric, tags={}): pass def __enter__(self): pass def __exit__(self, type, value, traceback): pass class StatisticsCalculator(): def __init__( self, # s3_client, # object_key_prefix, # feature_set_version, column_types, **kwargs, ): self.column_types = column_types @property def data_tags(self): return dict() def process(self, df): return self.calculate_statistics_overview(df) def calculate_statistics_overview(self, df): increment( 'lambda.transformer_actions.calculate_statistics_overview.start', self.data_tags, ) with timer( 'lambda.transformer_actions.calculate_statistics_overview.time', self.data_tags): data = dict(count=len(df.index)) arr_args_1 = [df[col] for col in df.columns], arr_args_2 = [col for col in df.columns], dicts = run_parallel(self.statistics_overview, arr_args_1, arr_args_2) for d in dicts: data.update(d) # object_key = s3_paths.path_statistics_overview(self.object_key_prefix) # s3_data.upload_json_sorted(self.s3_client, object_key, data) increment( 'lambda.transformer_actions.calculate_statistics_overview.success', self.data_tags, ) return data def statistics_overview(self, series, col): try: return self.__statistics_overview(series, col) except Exception as err: increment( 'lambda.transformer_actions.calculate_statistics_overview.column.failed', merge_dict(self.data_tags, { 'col': col, 'error': err.__class__.__name__, }), ) traceback.print_exc() return {} def __statistics_overview(self, series, col): # The following regex based replace has high overheads # series = series.replace(r'^\s*$', np.nan, regex=True) series_cleaned = series.map(lambda x: x if (not isinstance(x, str) or (len(x) > 0 and not x.isspace())) else np.nan) df_value_counts = series_cleaned.value_counts(dropna=False) df = df_value_counts.reset_index() df.columns = [col, 'count'] df_top_value_counts = df if df.shape[0] > VALUE_COUNT_LIMIT: df_top_value_counts = df.head(VALUE_COUNT_LIMIT) # TODO: remove duplicate data for distinct values # object_key_distinct_values = s3_paths.path_distinct_values_by_column(self.object_key_prefix, col) # s3_data.upload_dataframe(self.s3_client, df_top_value_counts, object_key_distinct_values, columns=[col]) # object_key_statistics = s3_paths.path_statistics_by_column(self.object_key_prefix, col) # s3_data.upload_dataframe(self.s3_client, df_top_value_counts, object_key_statistics) # features = self.feature_set_version['features'] # feature = find(lambda x: x['uuid'] == col, features) # if feature and feature.get('transformed'): # return {} column_type = self.column_types.get(col) series_non_null = series_cleaned.dropna() if column_type == NUMBER: series_non_null = series_non_null.astype(float).astype(int) elif column_type == NUMBER_WITH_DECIMALS: series_non_null = series_non_null.astype(float) count_unique = len(df_value_counts.index) data = { f'{col}/count': series_non_null.size, f'{col}/count_distinct': count_unique - 1 if np.nan in df_value_counts else count_unique, f'{col}/null_value_rate': 0 if series_cleaned.size == 0 else series_cleaned.isnull().sum() / series_cleaned.size, } if len(series_non_null) == 0: return data dates = None if column_type in NUMBER_TYPES: data[f'{col}/average'] = series_non_null.sum() / len(series_non_null) data[f'{col}/max'] = series_non_null.max() data[f'{col}/median'] = series_non_null.quantile(0.5) data[f'{col}/min'] = series_non_null.min() data[f'{col}/sum'] = series_non_null.sum() elif column_type == DATETIME: dates = pd.to_datetime(series_non_null, utc=True, errors='coerce').dropna() data[f'{col}/max'] = dates.max().isoformat() data[f'{col}/median'] = dates.sort_values().iloc[math.floor(len(dates) / 2)].isoformat() data[f'{col}/min'] = dates.min().isoformat() if column_type not in NUMBER_TYPES: if dates is not None: value_counts = dates.value_counts() else: value_counts = series_non_null.value_counts() mode = value_counts.index[0] if column_type == DATETIME: mode = mode.isoformat() data[f'{col}/mode'] = mode return data src/data_cleaner/statistics/__init__.py METASEP src/data_cleaner/shared/utils.py METASEP from data_cleaner.column_type_detector import ( NUMBER, NUMBER_WITH_DECIMALS, ) import numpy as np def clean_series(series, column_type, dropna=True): series_cleaned = series.map( lambda x: x if (not isinstance(x, str) or (len(x) > 0 and not x.isspace())) else np.nan, ) if dropna: series_cleaned = series_cleaned.dropna() if column_type == NUMBER: try: series_cleaned = series_cleaned.astype(float).astype(int) except ValueError: series_cleaned = series_cleaned.astype(float) elif column_type == NUMBER_WITH_DECIMALS: series_cleaned = series_cleaned.astype(float) return series_cleaned src/data_cleaner/shared/multi.py METASEP from concurrent.futures import ThreadPoolExecutor from threading import Thread MAX_WORKERS = 16 def start_thread(target, **kwargs): thread = Thread( target=target, kwargs=kwargs, ) thread.start() return thread def parallelize(func, arr): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, arr) def parallelize_multiple_args(func, arr_args): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, *zip(*arr_args)) def run_parallel_threads(list_of_funcs_and_args_or_kwargs): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: for func, args in list_of_funcs_and_args_or_kwargs: pool.submit(func, *args) def run_parallel(func, arr_args_1, arr_args_2): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, *arr_args_1, *arr_args_2) src/data_cleaner/shared/hash.py METASEP from functools import reduce import math import re def dig(obj_arg, arr_or_string): if type(arr_or_string) is str: arr_or_string = arr_or_string.split('.') arr = list(map(str.strip, arr_or_string)) def _build(obj, key): tup = re.split(r'\[(\d+)\]$', key) if len(tup) >= 2: key, index = filter(lambda x: x, tup) if key and index: return obj[key][int(index)] elif index: return obj[int(index)] elif obj: return obj.get(key) else: return obj return reduce(_build, arr, obj_arg) def flatten(input_data): final_data = {} for k1, v1 in input_data.items(): if type(v1) is dict: for k2, v2 in v1.items(): if type(v2) is dict: for k3, v3 in v2.items(): final_data[f'{k1}_{k2}_{k3}'] = v3 else: final_data[f'{k1}_{k2}'] = v2 else: final_data[k1] = v1 return final_data def ignore_keys(d, keys): d_keys = d.keys() d2 = d.copy() for key in keys: if key in d_keys: d2.pop(key) return d2 def ignore_keys_with_blank_values(d): d2 = d.copy() for key, value in d.items(): if not value: d2.pop(key) return d2 def extract(d, keys): def _build(obj, key): val = d.get(key, None) if val is not None: obj[key] = val return obj return reduce(_build, keys, {}) def extract_arrays(input_data): arr = [] for k, v in input_data.items(): if type(v) is list: arr.append(v) return arr def group_by(func, arr): def _build(obj, item): val = func(item) if not obj.get(val): obj[val] = [] obj[val].append(item) return obj return reduce(_build, arr, {}) def index_by(func, arr): obj = {} for item in arr: key = func(item) obj[key] = item return obj def merge_dict(a, b): c = a.copy() c.update(b) return c def replace_dict_nan_value(d): def _replace_nan_value(v): if type(v) == float and math.isnan(v): return None return v return {k: _replace_nan_value(v) for k, v in d.items()} src/data_cleaner/shared/array.py METASEP import random def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)] def difference(li1, li2): li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2] return li_dif def flatten(arr): return [item for sublist in arr for item in sublist] def find(condition, arr, map=None): try: return next(map(x) if map else x for x in arr if condition(x)) except StopIteration: return None def sample(arr): return arr[random.randrange(0, len(arr))] def subtract(arr1, arr2): return [i for i in arr1 if i not in arr2] src/data_cleaner/shared/__init__.py METASEP src/data_cleaner/pipelines/base.py METASEP from data_cleaner.cleaning_rules.remove_columns_with_high_empty_rate \ import RemoveColumnsWithHighEmptyRate from data_cleaner.cleaning_rules.remove_columns_with_single_value \ import RemoveColumnsWithSingleValue from data_cleaner.transformer_actions.base import BaseAction DEFAULT_RULES = [ RemoveColumnsWithHighEmptyRate, RemoveColumnsWithSingleValue, ] class BasePipeline(): def __init__(self, actions=[]): self.actions = actions self.rules = DEFAULT_RULES def create_actions(self, df, column_types, statistics): all_suggestions = [] for rule in self.rules: suggestions = rule(df, column_types, statistics).evaluate() if suggestions: all_suggestions += suggestions self.actions = all_suggestions return all_suggestions def transform(self, df): if len(self.actions) == 0: print('Pipeline is empty.') return df df_transformed = df for action in self.actions: df_transformed = BaseAction(action['action_payload']).execute(df_transformed) return df_transformed src/data_cleaner/pipelines/__init__.py METASEP src/data_cleaner/cleaning_rules/unit_conversion.py METASEP src/data_cleaner/cleaning_rules/type_conversion.py METASEP src/data_cleaner/cleaning_rules/remove_outliers.py METASEP src/data_cleaner/cleaning_rules/remove_duplicate_rows.py METASEP src/data_cleaner/cleaning_rules/remove_columns_with_single_value.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveColumnsWithSingleValue(BaseRule): # Check statistic [feature_uuid]/count_distinct def evaluate(self): columns_with_single_value = [] for c in self.df_columns: if f'{c}/count_distinct' not in self.statistics: continue feature_count_distinct = self.statistics[f'{c}/count_distinct'] if feature_count_distinct == 1: columns_with_single_value.append(c) suggestions = [] suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with single value', f'The following columns have single value in all rows: {columns_with_single_value}.'\ ' Suggest to remove them.', ActionType.REMOVE, action_arguments=columns_with_single_value, axis=Axis.COLUMN, )) return suggestions src/data_cleaner/cleaning_rules/remove_columns_with_high_empty_rate.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveColumnsWithHighEmptyRate(BaseRule): MISSING_RATE_THRESHOLD = 0.8 def evaluate(self): columns_with_missing_values = [] columns_with_no_values = [] for c in self.df_columns: if self.statistics.get(f'{c}/count') == 0: columns_with_no_values.append(c) elif f'{c}/null_value_rate' in self.statistics: null_value_rate = self.statistics[f'{c}/null_value_rate'] if null_value_rate >= self.MISSING_RATE_THRESHOLD: columns_with_missing_values.append(c) suggestions = [] if len(columns_with_no_values) > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with no values', f'The following columns have no values: {columns_with_no_values}.'\ ' Removing them may increase your data quality.', ActionType.REMOVE, action_arguments=columns_with_no_values, axis=Axis.COLUMN, )) if len(columns_with_missing_values) > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with high empty rate', f'The following columns have high empty rate: {columns_with_missing_values}.'\ ' Removing them may increase your data quality.', ActionType.REMOVE, action_arguments=columns_with_missing_values, axis=Axis.COLUMN, )) return suggestions src/data_cleaner/cleaning_rules/remove_columns_with_few_unique_values.py METASEP src/data_cleaner/cleaning_rules/remove_collinear_columns.py METASEP src/data_cleaner/cleaning_rules/reformat_values.py METASEP src/data_cleaner/cleaning_rules/impute_values.py METASEP src/data_cleaner/cleaning_rules/fix_encoding.py METASEP src/data_cleaner/cleaning_rules/base.py METASEP class BaseRule: def __init__(self, df, column_types, statistics): self.df = df self.df_columns = df.columns.tolist() self.column_types = column_types self.statistics = statistics def evaluate(self): """Evaluate data cleaning rule and generate suggested actions Returns ------- A list of suggested actions """ return [] def _build_transformer_action_suggestion( self, title, message, action_type, action_arguments=[], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ): return dict( title=title, message=message, action_payload=dict( action_type=action_type, action_arguments=action_arguments, action_code=action_code, action_options=action_options, action_variables=action_variables, axis=axis, outputs=outputs, ), ) src/data_cleaner/cleaning_rules/__init__.py METASEP src/data_cleaner/analysis/constants.py METASEP CHART_TYPE_BAR_HORIZONTAL = 'bar_horizontal' CHART_TYPE_LINE_CHART = 'line_chart' CHART_TYPE_HISTOGRAM = 'histogram' LABEL_TYPE_RANGE = 'range' DATA_KEY_CHARTS = 'charts' DATA_KEY_CORRELATION = 'correlations' DATA_KEY_OVERVIEW = 'overview' DATA_KEY_TIME_SERIES = 'time_series' DATA_KEYS = [ DATA_KEY_CHARTS, DATA_KEY_CORRELATION, DATA_KEY_OVERVIEW, DATA_KEY_TIME_SERIES, ] src/data_cleaner/analysis/charts.py METASEP from data_cleaner.analysis.constants import ( CHART_TYPE_BAR_HORIZONTAL, CHART_TYPE_LINE_CHART, CHART_TYPE_HISTOGRAM, DATA_KEY_TIME_SERIES, LABEL_TYPE_RANGE, ) from data_cleaner.shared.utils import clean_series from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ) import dateutil.parser import math import numpy as np import pandas as pd DD_KEY = 'lambda.analysis_charts' BUCKETS = 40 TIME_SERIES_BUCKETS = 40 def increment(metric, tags={}): pass def build_buckets(min_value, max_value, max_buckets, column_type): diff = max_value - min_value total_interval = 1 + diff bucket_interval = total_interval / max_buckets number_of_buckets = max_buckets is_integer = False parts = str(diff).split('.') if len(parts) == 1: is_integer = True else: is_integer = int(parts[1]) == 0 if NUMBER == column_type and total_interval <= max_buckets and is_integer: number_of_buckets = int(total_interval) bucket_interval = 1 elif bucket_interval > 1: bucket_interval = math.ceil(bucket_interval) else: bucket_interval = round(bucket_interval * 100, 1) / 100 buckets = [] for i in range(number_of_buckets): min_v = min_value + (i * bucket_interval) max_v = min_value + ((i + 1) * bucket_interval) buckets.append(dict( max_value=max_v, min_value=min_v, values=[], )) return buckets, bucket_interval def build_histogram_data(col1, series, column_type): increment(f'{DD_KEY}.build_histogram_data.start', dict(feature_uuid=col1)) max_value = series.max() min_value = series.min() buckets, bucket_interval = build_buckets(min_value, max_value, BUCKETS, column_type) if bucket_interval == 0: return for value in series.values: index = math.floor((value - min_value) / bucket_interval) if value == max_value: index = len(buckets) - 1 buckets[index]['values'].append(value) x = [] y = [] for bucket in buckets: x.append(dict( max=bucket['max_value'], min=bucket['min_value'], )) y.append(dict(value=len(bucket['values']))) increment(f'{DD_KEY}.build_histogram_data.succeeded', dict(feature_uuid=col1)) return dict( type=CHART_TYPE_HISTOGRAM, x=x, x_metadata=dict( label_type=LABEL_TYPE_RANGE, ), y=y, ) def build_correlation_data(df, col1, features): increment(f'{DD_KEY}.build_correlation_data.start', dict(feature_uuid=col1)) x = [] y = [] df_copy = df.copy() for feature in features: col2 = feature['uuid'] column_type = feature['column_type'] series = df_copy[col2] df_copy[col2] = clean_series(series, column_type, dropna=False) corr = df_copy.corr() for feature in features: col2 = feature['uuid'] if col1 != col2: value = corr[col1].get(col2, None) if value is not None: x.append(dict(label=col2)) y.append(dict(value=value)) increment(f'{DD_KEY}.build_correlation_data.succeeded', dict(feature_uuid=col1)) return dict( type=CHART_TYPE_BAR_HORIZONTAL, x=x, y=y, ) def build_time_series_data(df, feature, datetime_column, column_type): col1 = feature['uuid'] column_type = feature['column_type'] tags = dict( column_type=column_type, datetime_column=datetime_column, feature_uuid=col1, ) increment(f'{DD_KEY}.build_time_series_data.start', tags) # print(feature, datetime_column) datetimes = clean_series(df[datetime_column], DATETIME) if datetimes.size <= 1: return min_value_datetime = dateutil.parser.parse(datetimes.min()).timestamp() max_value_datetime = dateutil.parser.parse(datetimes.max()).timestamp() buckets, bucket_interval = build_buckets( min_value_datetime, max_value_datetime, TIME_SERIES_BUCKETS, column_type, ) x = [] y = [] df_copy = df.copy() df_copy[datetime_column] = \ pd.to_datetime(df[datetime_column]).apply(lambda x: x if pd.isnull(x) else x.timestamp()) for bucket in buckets: max_value = bucket['max_value'] min_value = bucket['min_value'] series = df_copy[( df_copy[datetime_column] >= min_value ) & ( df_copy[datetime_column] < max_value )][col1] x.append(dict( max=max_value, min=min_value, )) series_cleaned = clean_series(series, column_type, dropna=False) df_value_counts = series_cleaned.value_counts(dropna=False) series_non_null = series_cleaned.dropna() count_unique = len(df_value_counts.index) y_data = dict( count=series_non_null.size, count_distinct=count_unique - 1 if np.nan in df_value_counts else count_unique, null_value_rate=0 if series_cleaned.size == 0 else series_cleaned.isnull().sum() / series_cleaned.size, ) if column_type in [NUMBER, NUMBER_WITH_DECIMALS]: y_data.update(dict( average=series_non_null.sum() / len(series_non_null), max=series_non_null.max(), median=series_non_null.quantile(0.5), min=series_non_null.min(), sum=series_non_null.sum(), )) elif column_type in [CATEGORY, CATEGORY_HIGH_CARDINALITY, TRUE_OR_FALSE]: value_counts = series_non_null.value_counts() if len(value_counts.index): value_counts_top = value_counts.sort_values(ascending=False).iloc[:12] mode = value_counts_top.index[0] y_data.update(dict( mode=mode, value_counts=value_counts_top.to_dict(), )) y.append(y_data) increment(f'{DD_KEY}.build_time_series_data.succeeded', tags) return dict( type=CHART_TYPE_LINE_CHART, x=x, x_metadata=dict( label=datetime_column, label_type=LABEL_TYPE_RANGE, ), y=y, ) def build_overview_data(df, datetime_features): increment(f'{DD_KEY}.build_overview_data.start') time_series = [] df_copy = df.copy() for feature in datetime_features: column_type = feature['column_type'] datetime_column = feature['uuid'] tags = dict(datetime_column=datetime_column) increment(f'{DD_KEY}.build_overview_time_series.start', tags) if clean_series(df_copy[datetime_column], DATETIME).size <= 1: continue df_copy[datetime_column] = \ pd.to_datetime(df[datetime_column]).apply(lambda x: x if pd.isnull(x) else x.timestamp()) min_value1 = df_copy[datetime_column].min() max_value1 = df_copy[datetime_column].max() buckets, bucket_interval = build_buckets(min_value1, max_value1, TIME_SERIES_BUCKETS, column_type) x = [] y = [] for bucket in buckets: max_value = bucket['max_value'] min_value = bucket['min_value'] df_filtered = df_copy[( df_copy[datetime_column] >= min_value ) & ( df_copy[datetime_column] < max_value )] x.append(dict( max=max_value, min=min_value, )) y.append(dict( count=len(df_filtered.index), )) time_series.append(dict( type=CHART_TYPE_LINE_CHART, x=x, x_metadata=dict( label=datetime_column, label_type=LABEL_TYPE_RANGE, ), y=y, )) increment(f'{DD_KEY}.build_overview_time_series.succeeded', tags) increment(f'{DD_KEY}.build_overview_data.succeeded') return { DATA_KEY_TIME_SERIES: time_series, } src/data_cleaner/analysis/calculator.py METASEP from data_cleaner.analysis import charts from data_cleaner.analysis.constants import ( DATA_KEY_CHARTS, DATA_KEY_CORRELATION, DATA_KEY_TIME_SERIES, ) from data_cleaner.shared.utils import clean_series from data_cleaner.shared.hash import merge_dict from data_cleaner.shared.multi import run_parallel from data_cleaner.transformer_actions import constants from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ) DD_KEY = 'lambda.analysis_calculator' def increment(metric, tags={}): pass class AnalysisCalculator(): def __init__( self, df, column_types, **kwargs, ): self.df = df self.column_types = column_types self.features = [{'uuid': col, 'column_type': column_types.get(col)} for col in df.columns] def process(self, df): increment(f'{DD_KEY}.process.start', self.tags) df_columns = df.columns features_to_use = self.features datetime_features_to_use = [f for f in self.datetime_features if f['uuid'] in df_columns] arr_args_1 = [df for _ in features_to_use], arr_args_2 = features_to_use, data_for_columns = [d for d in run_parallel(self.calculate_column, arr_args_1, arr_args_2)] overview = charts.build_overview_data( df, datetime_features_to_use, ) correlation_overview = [] for d in data_for_columns: corr = d.get(DATA_KEY_CORRELATION) if corr: correlation_overview.append({ 'feature': d['feature'], DATA_KEY_CORRELATION: corr, }) increment(f'{DD_KEY}.process.succeeded', self.tags) return data_for_columns, merge_dict(overview, { DATA_KEY_CORRELATION: correlation_overview, }) @property def features_by_uuid(self): data = {} for feature in self.features: data[feature['uuid']] = feature return data @property def datetime_features(self): return [f for f in self.features if f['column_type'] == DATETIME] @property def tags(self): return dict() def calculate_column(self, df, feature): df_columns = df.columns features_to_use = [f for f in self.features if f['uuid'] in df_columns] datetime_features_to_use = [f for f in self.datetime_features if f['uuid'] in df_columns] col = feature['uuid'] column_type = feature['column_type'] tags = merge_dict(self.tags, dict(column_type=column_type, feature_uuid=col)) increment(f'{DD_KEY}.calculate_column.start', tags) series = df[col] series_cleaned = clean_series(series, column_type) chart_data = [] correlation = [] time_series = [] if column_type in [NUMBER, NUMBER_WITH_DECIMALS]: histogram_data = charts.build_histogram_data(col, series_cleaned, column_type) if histogram_data: chart_data.append(histogram_data) correlation.append(charts.build_correlation_data(df, col, features_to_use)) if column_type in [ CATEGORY, CATEGORY_HIGH_CARDINALITY, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ]: time_series = [] for f in datetime_features_to_use: time_series_chart = charts.build_time_series_data(df, feature, f['uuid'], column_type) if time_series_chart: time_series.append(time_series_chart) increment(f'{DD_KEY}.calculate_column.succeeded', tags) return { 'feature': feature, DATA_KEY_CHARTS: chart_data, DATA_KEY_CORRELATION: correlation, DATA_KEY_TIME_SERIES: time_series, } src/data_cleaner/analysis/__init__.py METASEP src/data_cleaner/tests/cleaning_rules/test_remove_columns_with_single_value.py METASEP from data_cleaner.tests.base_test import TestCase from data_cleaner.cleaning_rules.remove_columns_with_single_value \ import RemoveColumnsWithSingleValue import pandas as pd import numpy as np class RemoveColumnWithSingleValueTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01', True], [2, '2022-01-02', True], [3, np.NaN, True], [4, np.NaN, True], [5, np.NaN, True], ], columns=['id', 'deleted_at', 'is_active']) column_types = { 'id': 'number', 'deleted_at': 'datetime', 'is_active': 'true_or_false', } statistics = { 'id/count_distinct': 5, 'deleted_at/count_distinct': 2, 'is_active/count_distinct': 1, } result = RemoveColumnsWithSingleValue(df, column_types, statistics).evaluate() self.assertEqual(result, [ dict( title='Remove columns with single value', message=f'The following columns have single value in all rows: [\'is_active\'].'\ ' Suggest to remove them.', action_payload=dict( action_type='remove', action_arguments=['is_active'], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) src/data_cleaner/tests/cleaning_rules/test_remove_columns_with_high_empty_rate.py METASEP from data_cleaner.tests.base_test import TestCase from data_cleaner.cleaning_rules.remove_columns_with_high_empty_rate \ import RemoveColumnsWithHighEmptyRate import numpy as np import pandas as pd class RemoveColumnWithHighMissingRateTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01'], [2, np.NaN], [3, np.NaN], [4, np.NaN], [5, np.NaN], ], columns=['id', 'deleted_at']) column_types = { 'id': 'number', 'deleted_at': 'datetime', } statistics = { 'id/null_value_rate': 0, 'deleted_at/null_value_rate': 0.8, } result = RemoveColumnsWithHighEmptyRate( df, column_types, statistics, ).evaluate() self.assertEqual(result, [ dict( title='Remove columns with high empty rate', message='The following columns have high empty rate: [\'deleted_at\'].'\ ' Removing them may increase your data quality.', action_payload=dict( action_type='remove', action_arguments=['deleted_at'], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) src/data_cleaner/tests/cleaning_rules/__init__.py METASEP src/data_cleaner/data_cleaner.py METASEP from data_cleaner import column_type_detector from data_cleaner.analysis.calculator import AnalysisCalculator from data_cleaner.pipelines.base import BasePipeline from data_cleaner.shared.hash import merge_dict from data_cleaner.statistics.calculator import StatisticsCalculator def clean(df): cleaner = DataCleaner() return cleaner.clean(df) class DataCleaner(): def analyze(self, df): """ Analyze a dataframe 1. Detect column types 2. Calculate statisitics 3. Calculate analysis """ column_types = column_type_detector.infer_column_types(df) statistics = StatisticsCalculator(column_types).process(df) analysis = AnalysisCalculator(df, column_types).process(df) return dict( analysis=analysis, column_types=column_types, statistics=statistics, ) def clean(self, df): df_stats = self.analyze(df) pipeline = BasePipeline() suggested_actions = pipeline.create_actions( df, df_stats['column_types'], df_stats['statistics'], ) df_cleaned = pipeline.transform(df) return merge_dict(df_stats, dict( df_cleaned=df_cleaned, suggested_actions=suggested_actions, )) src/data_cleaner/column_type_detector.py METASEP from data_cleaner.shared.array import subtract import numpy as np import re import warnings DATETIME_MATCHES_THRESHOLD = 0.5 MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES = 40 CATEGORY = 'category' CATEGORY_HIGH_CARDINALITY = 'category_high_cardinality' DATETIME = 'datetime' EMAIL = 'email' NUMBER = 'number' NUMBER_WITH_DECIMALS = 'number_with_decimals' PHONE_NUMBER = 'phone_number' TEXT = 'text' TRUE_OR_FALSE = 'true_or_false' ZIP_CODE = 'zip_code' NUMBER_TYPES = [NUMBER, NUMBER_WITH_DECIMALS] STRING_TYPES = [EMAIL, PHONE_NUMBER, TEXT, ZIP_CODE] COLUMN_TYPES = [ CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, EMAIL, NUMBER, NUMBER_WITH_DECIMALS, PHONE_NUMBER, TEXT, TRUE_OR_FALSE, ZIP_CODE, ] REGEX_DATETIME_PATTERN = r'^[\d]{2,4}-[\d]{1,2}-[\d]{1,2}$|^[\d]{2,4}-[\d]{1,2}-[\d]{1,2}[Tt ]{1}[\d]{1,2}:[\d]{1,2}[:]{0,1}[\d]{1,2}[\.]{0,1}[\d]*|^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$|^\d{1,4}[-\/]{1}\d{1,2}[-\/]{1}\d{1,4}$|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+(\d{1,2})[\s,]+(\d{2,4})' REGEX_EMAIL_PATTERN = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$" REGEX_EMAIL = re.compile(REGEX_EMAIL_PATTERN) REGEX_INTEGER_PATTERN = r'^[\-]{0,1}[\$]{0,1}[0-9,]+$' REGEX_INTEGER = re.compile(REGEX_INTEGER_PATTERN) REGEX_NUMBER_PATTERN = r'^[\-]{0,1}[\$]{0,1}[0-9,]+\.[0-9]*%{0,1}$|^[\-]{0,1}[\$]{0,1}[0-9,]+%{0,1}$' REGEX_NUMBER = re.compile(REGEX_NUMBER_PATTERN) REGEX_PHONE_NUMBER_PATTERN = r'^\s*(?:\+?(\d{1,3}))?[-. (]*(\d{3})[-. )]*(\d{3})[-. ]*(\d{4})(?: *x(\d+))?\s*$' REGEX_PHONE_NUMBER = re.compile(REGEX_PHONE_NUMBER_PATTERN) REGEX_ZIP_CODE_PATTERN = r'^\d{3,5}(?:[-\s]\d{4})?$' REGEX_ZIP_CODE = re.compile(REGEX_ZIP_CODE_PATTERN) def infer_column_types(df, **kwargs): binary_feature_names = [] category_feature_names = [] datetime_feature_names = [] email_features = [] float_feature_names = [] integer_feature_names = [] non_number_feature_names = [] phone_number_feature_names = [] text_feature_names = [] zip_code_feature_names = [] for idx, col_type in enumerate(df.dtypes): col_name = df.columns[idx] if 'datetime64' in str(col_type): datetime_feature_names.append(col_name) elif col_type == 'object': df_sub = df[col_name].copy() df_sub = df_sub.replace('^\s+$', np.nan, regex=True) df_sub = df_sub.dropna() df_sub = df_sub.apply(lambda x: x.strip() if type(x) is str else x) if df_sub.empty: non_number_feature_names.append(col_name) else: first_item = df_sub.iloc[0] if type(first_item) is list: text_feature_names.append(col_name) elif type(first_item) is bool or type(first_item) is np.bool_: if len(df[col_name].unique()) <= 2: binary_feature_names.append(col_name) else: category_feature_names.append(col_name) elif len(df[col_name].unique()) <= 2: binary_feature_names.append(col_name) else: df_sub = df_sub.astype(str) incorrect_emails = len( df_sub[df_sub.str.contains(REGEX_EMAIL) == False].index, ) warnings.filterwarnings('ignore', 'This pattern has match groups') incorrect_phone_numbers = len( df_sub[df_sub.str.contains(REGEX_PHONE_NUMBER) == False].index, ) incorrect_zip_codes = len( df_sub[df_sub.str.contains(REGEX_ZIP_CODE) == False].index, ) if all(df_sub.str.contains(REGEX_INTEGER)): integer_feature_names.append(col_name) elif all(df_sub.str.contains(REGEX_NUMBER)): float_feature_names.append(col_name) elif incorrect_emails / len(df_sub.index) <= 0.99: email_features.append(col_name) elif incorrect_phone_numbers / len(df_sub.index) <= 0.99: phone_number_feature_names.append(col_name) elif incorrect_zip_codes / len(df_sub.index) <= 0.99: zip_code_feature_names.append(col_name) else: non_number_feature_names.append(col_name) elif col_type == 'bool': binary_feature_names.append(col_name) elif np.issubdtype(col_type, np.floating): float_feature_names.append(col_name) elif np.issubdtype(col_type, np.integer): df_sub = df[col_name].copy() df_sub = df_sub.dropna() if df_sub.min() >= 100 and df_sub.max() <= 99999 and 'zip' in col_name.lower(): zip_code_feature_names.append(col_name) else: integer_feature_names.append(col_name) number_feature_names = float_feature_names + integer_feature_names binary_feature_names += \ [col for col in number_feature_names if df[col].nunique(dropna=False) == 2] binary_feature_names += \ [col for col in non_number_feature_names if df[col].nunique(dropna=False) == 2] float_feature_names = [col for col in float_feature_names if col not in binary_feature_names] integer_feature_names = \ [col for col in integer_feature_names if col not in binary_feature_names] for col_name in subtract(non_number_feature_names, binary_feature_names): df_drop_na = df[col_name].dropna() if df_drop_na.empty: text_feature_names.append(col_name) else: matches = df_drop_na.astype(str).str.contains(REGEX_DATETIME_PATTERN) matches = matches.where(matches == True).dropna() if type(df_drop_na.iloc[0]) is list: text_feature_names.append(col_name) elif len(df_drop_na[matches.index]) / len(df_drop_na) >= DATETIME_MATCHES_THRESHOLD: datetime_feature_names.append(col_name) elif df_drop_na.nunique() / len(df_drop_na) >= 0.8: text_feature_names.append(col_name) else: word_count, _ = \ df[col_name].dropna().map(lambda x: (len(str(x).split(' ')), str(x))).max() if word_count > MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES: text_feature_names.append(col_name) else: category_feature_names.append(col_name) low_cardinality_category_feature_names = \ [col for col in category_feature_names if df[col].nunique() <= kwargs.get( 'category_cardinality_threshold', 255, )] high_cardinality_category_feature_names = \ [col for col in category_feature_names if col not in low_cardinality_category_feature_names] column_types = {} array_types_mapping = { CATEGORY: low_cardinality_category_feature_names, CATEGORY_HIGH_CARDINALITY: high_cardinality_category_feature_names, DATETIME: datetime_feature_names, EMAIL: email_features, NUMBER: integer_feature_names, NUMBER_WITH_DECIMALS: float_feature_names, PHONE_NUMBER: phone_number_feature_names, TEXT: text_feature_names, TRUE_OR_FALSE: binary_feature_names, ZIP_CODE: zip_code_feature_names, } for col_type, arr in array_types_mapping.items(): for col in arr: column_types[col] = col_type return column_types src/data_cleaner/__init__.py METASEP src/data_cleaner/tests/base_test.py METASEP import unittest class TestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): pass src/data_cleaner/tests/__init__.py METASEP src/data_cleaner/tests/transformer_actions/test_row.py METASEP
[ { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_expected = pd.DataFrame([\n ['[email protected]'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n assert_frame_equal(df_new, df_expected)\n assert_frame_equal(df_new2, df_expected)\n\n def test_filter_row_not_contains_string(self):\n df = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n action = dict(\n action_code='email not contains mailnet',\n )\n action2 = dict(\n action_code='email not contains \\'mailnet\\'',\n )\n action3 = dict(\n action_code = 'email not contains @',\n )\n action4 = dict(\n action_code = 'email not contains \\'^e+\\w\\'',\n )\n action_invalid = dict(\n action_code='subscription not contains False'\n ) \n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_expected = pd.DataFrame([\n ['[email protected]'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n assert_frame_equal(df_new, df_expected)\n assert_frame_equal(df_new2, df_expected)\n\n def test_filter_row_not_contains_string(self):\n df = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n action = dict(\n action_code='email not contains mailnet',\n )\n action2 = dict(\n action_code='email not contains \\'mailnet\\'',\n )\n action3 = dict(\n action_code = 'email not contains @',\n )\n action4 = dict(\n action_code = 'email not contains \\'^e+\\w\\'',\n )\n action_invalid = dict(\n action_code='subscription not contains False'\n ) \n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_expected = pd.DataFrame([\n ['[email protected]'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n assert_frame_equal(df_new, df_expected)\n assert_frame_equal(df_new2, df_expected)\n\n def test_filter_row_not_contains_string(self):\n df = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n action = dict(\n action_code='email not contains mailnet',\n )\n action2 = dict(\n action_code='email not contains \\'mailnet\\'',\n )\n action3 = dict(\n action_code = 'email not contains @',\n )\n action4 = dict(\n action_code = 'email not contains \\'^e+\\w\\'',\n )\n action_invalid = dict(\n action_code='subscription not contains False'\n ) \n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_expected1 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected2 = pd.DataFrame([\n [np.NaN, False],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected3 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False]\n ], columns=[\n 'email',\n 'subscription'\n ])\n assert_frame_equal(df_new, df_expected1)\n assert_frame_equal(df_new2, df_expected1)\n assert_frame_equal(df_new3, df_expected2)\n assert_frame_equal(df_new4, df_expected3)\n\n with self.assertRaises(Exception):\n _ = filter_rows(df, action_invalid, original_df=df).reset_index(drop=True)\n\n def test_filter_rows_multi_condition(self):\n df = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n action = dict(action_code='(value < 110 and value >= 50) and (value != null)')\n action2 = dict(action_code='brand contains brand and inventory != null')\n action3 = dict(action_code='(brand != null and value > 60) or (discounted == null)')\n action4 = dict(\n action_code='(discounted == True and inventory > 15)'\n ' or (discounted == False and value != null)'\n )\n action5 = dict(\n action_code='(brand not contains company and value == 75 and inventory <= 80)'\n ' or (discounted != null)'\n )\n df_expected = pd.DataFrame(\n [\n [100, None, '', 10],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected2 = pd.DataFrame(\n [\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected3 = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected4 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected5 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_new5 = filter_rows(df, action5, original_df=df).reset_index(drop=True)\n df_new['value'] = df_new['value'].astype(int)\n df_new['inventory'] = df_new['inventory'].astype(int)\n df_new2['brand'] = df_new2['brand'].astype(str)\n df_new2['inventory'] = df_new2['inventory'].astype(int)\n df_new4['value'] = df_new4['value'].astype(int)\n df_new4['brand'] = df_new4['brand'].astype(str)\n df_new4['discounted'] = df_new4['discounted'].astype(bool)\n assert_frame_equal(df_expected, df_new)\n assert_frame_equal(df_expected2, df_new2)\n assert_frame_equal(df_expected3, df_new3)\n assert_frame_equal(df_expected4, df_new4)\n assert_frame_equal(df_expected5, df_new5)\n\n def test_filter_row_implicit_null(self):\n # tests that implicit null values in the transformed dataframe are still removed\n df = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n action_payload = {\n 'action_type': 'filter',\n 'action_code': '%{1} != null',\n 'action_arguments': [],\n 'action_options': {},\n 'axis': 'row',\n 'action_variables': {\n '1': {\n 'id': 'value',\n 'type': 'feature',\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'value'\n } \n },\n },\n 'outputs': []\n }\n action = BaseAction(action_payload)\n df_new = action.execute(df).reset_index(drop=True)\n df_expected = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_new['value'] = df_new['value'].astype(int)\n assert_frame_equal(df_expected, df_new)\n\n def test_original_df_column_name_padding(self):\n # tests edge cases for when columns with the special prefixes \"orig_\" and \"tf_\" are given as input\n df = pd.DataFrame([\n [0,1, None],\n [1,2, np.NaN],\n [np.NaN, 3, 4],\n [3, None, 5]\n ], columns=[\n 'col',\n 'orig_col',\n 'tf_col'\n ])\n df_expected = pd.DataFrame([\n [0,1, None],\n [1,2, np.NaN],\n ], columns=[\n 'col',\n 'orig_col',\n 'tf_col'\n ])\n action = dict(action_code='(col != null) and (orig_col != null)')\n df_new = filter_rows(df, action, original_df = df)\n df_new['col'] = df_new['col'].astype(int)\n df_new['orig_col'] = df_new['orig_col'].astype(int)\n assert_frame_equal(df_new, df_expected)\n\n def test_sort_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[0, 3, 1, 2]]),\n (dict(action_arguments=['integer'], action_options=dict(ascending=False)), df.iloc[[1, 2, 0, 3]]),\n (dict(action_arguments=['string']), df.iloc[[0, 1, 2, 3]]),\n (dict(action_arguments=['string'], action_options=dict(ascending=False)), df.iloc[[3, 2, 1, 0]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(sort_rows(df, action).equals(val))\n\n def test_sort_rows_with_multiple_columns(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer', 'string']), df.iloc[[0, 3, 1, 2]]),\n (dict(action_arguments=['integer', 'string'], action_options=dict(ascendings=[False, False])), df.iloc[[2, 1, 3, 0]]),\n (dict(action_arguments=['integer', 'string'], action_options=dict(ascendings=[True, False])), df.iloc[[3, 0, 2, 1]]),\n (dict(action_arguments=['string', 'integer'], action_options=dict(ascending=False)), df.iloc[[3, 2, 1, 0]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(sort_rows(df, action).equals(val))\n\n def test_sort_rows_with_number_and_empty_strings(self):\n df = pd.DataFrame([\n [0],\n [None],\n [3],\n [''],\n [1],\n [2],\n ], columns=[\n 'integer',\n ])\n\n test_cases = [\n (dict(ascending=True), df.iloc[[1, 3, 0, 4, 5, 2]]),\n (dict(ascending=False), df.iloc[[2, 5, 4, 0, 1, 3]]),\n ]\n for action_options, val in test_cases:\n action = dict(\n action_arguments=['integer'],\n action_variables={\n '1': dict(\n feature=dict(\n column_type='number',\n uuid='integer',\n ),\n ),\n },\n action_options=action_options,\n )\n", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_expected = pd.DataFrame([\n ['[email protected]'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n assert_frame_equal(df_new, df_expected)\n assert_frame_equal(df_new2, df_expected)\n\n def test_filter_row_not_contains_string(self):\n df = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n action = dict(\n action_code='email not contains mailnet',\n )\n action2 = dict(\n action_code='email not contains \\'mailnet\\'',\n )\n action3 = dict(\n action_code = 'email not contains @',\n )\n action4 = dict(\n action_code = 'email not contains \\'^e+\\w\\'',\n )\n action_invalid = dict(\n action_code='subscription not contains False'\n ) \n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_expected1 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected2 = pd.DataFrame([\n [np.NaN, False],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected3 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False]\n ], columns=[\n 'email',\n 'subscription'\n ])\n assert_frame_equal(df_new, df_expected1)\n assert_frame_equal(df_new2, df_expected1)\n assert_frame_equal(df_new3, df_expected2)\n assert_frame_equal(df_new4, df_expected3)\n\n with self.assertRaises(Exception):\n _ = filter_rows(df, action_invalid, original_df=df).reset_index(drop=True)\n\n def test_filter_rows_multi_condition(self):\n df = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n action = dict(action_code='(value < 110 and value >= 50) and (value != null)')\n action2 = dict(action_code='brand contains brand and inventory != null')\n action3 = dict(action_code='(brand != null and value > 60) or (discounted == null)')\n action4 = dict(\n action_code='(discounted == True and inventory > 15)'\n ' or (discounted == False and value != null)'\n )\n action5 = dict(\n action_code='(brand not contains company and value == 75 and inventory <= 80)'\n ' or (discounted != null)'\n )\n df_expected = pd.DataFrame(\n [\n [100, None, '', 10],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected2 = pd.DataFrame(\n [\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected3 = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected4 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected5 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_new5 = filter_rows(df, action5, original_df=df).reset_index(drop=True)\n df_new['value'] = df_new['value'].astype(int)\n df_new['inventory'] = df_new['inventory'].astype(int)\n df_new2['brand'] = df_new2['brand'].astype(str)\n df_new2['inventory'] = df_new2['inventory'].astype(int)\n df_new4['value'] = df_new4['value'].astype(int)\n df_new4['brand'] = df_new4['brand'].astype(str)\n df_new4['discounted'] = df_new4['discounted'].astype(bool)\n assert_frame_equal(df_expected, df_new)\n assert_frame_equal(df_expected2, df_new2)\n assert_frame_equal(df_expected3, df_new3)\n assert_frame_equal(df_expected4, df_new4)\n assert_frame_equal(df_expected5, df_new5)\n\n def test_filter_row_implicit_null(self):\n # tests that implicit null values in the transformed dataframe are still removed\n df = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n action_payload = {\n 'action_type': 'filter',\n 'action_code': '%{1} != null',\n 'action_arguments': [],\n 'action_options': {},\n 'axis': 'row',\n 'action_variables': {\n '1': {\n 'id': 'value',\n 'type': 'feature',\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'value'\n } \n },\n },\n 'outputs': []\n }", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_expected = pd.DataFrame([\n ['[email protected]'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n assert_frame_equal(df_new, df_expected)\n assert_frame_equal(df_new2, df_expected)\n\n def test_filter_row_not_contains_string(self):\n df = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n action = dict(\n action_code='email not contains mailnet',\n )\n action2 = dict(\n action_code='email not contains \\'mailnet\\'',\n )\n action3 = dict(\n action_code = 'email not contains @',\n )\n action4 = dict(\n action_code = 'email not contains \\'^e+\\w\\'',\n )\n action_invalid = dict(\n action_code='subscription not contains False'\n ) \n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_expected1 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected2 = pd.DataFrame([\n [np.NaN, False],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected3 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False]\n ], columns=[\n 'email',\n 'subscription'\n ])\n assert_frame_equal(df_new, df_expected1)\n assert_frame_equal(df_new2, df_expected1)\n assert_frame_equal(df_new3, df_expected2)\n assert_frame_equal(df_new4, df_expected3)\n\n with self.assertRaises(Exception):", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_expected = pd.DataFrame([\n ['[email protected]'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n assert_frame_equal(df_new, df_expected)\n assert_frame_equal(df_new2, df_expected)\n\n def test_filter_row_not_contains_string(self):\n df = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n action = dict(\n action_code='email not contains mailnet',\n )\n action2 = dict(\n action_code='email not contains \\'mailnet\\'',\n )\n action3 = dict(\n action_code = 'email not contains @',\n )\n action4 = dict(\n action_code = 'email not contains \\'^e+\\w\\'',\n )\n action_invalid = dict(\n action_code='subscription not contains False'\n ) \n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_expected1 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected2 = pd.DataFrame([\n [np.NaN, False],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected3 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False]\n ], columns=[\n 'email',\n 'subscription'\n ])\n assert_frame_equal(df_new, df_expected1)\n assert_frame_equal(df_new2, df_expected1)\n assert_frame_equal(df_new3, df_expected2)\n assert_frame_equal(df_new4, df_expected3)\n\n with self.assertRaises(Exception):\n _ = filter_rows(df, action_invalid, original_df=df).reset_index(drop=True)\n\n def test_filter_rows_multi_condition(self):\n df = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n action = dict(action_code='(value < 110 and value >= 50) and (value != null)')\n action2 = dict(action_code='brand contains brand and inventory != null')\n action3 = dict(action_code='(brand != null and value > 60) or (discounted == null)')\n action4 = dict(\n action_code='(discounted == True and inventory > 15)'\n ' or (discounted == False and value != null)'\n )\n action5 = dict(\n action_code='(brand not contains company and value == 75 and inventory <= 80)'\n ' or (discounted != null)'\n )\n df_expected = pd.DataFrame(\n [\n [100, None, '', 10],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected2 = pd.DataFrame(\n [\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected3 = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected4 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected5 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_new5 = filter_rows(df, action5, original_df=df).reset_index(drop=True)\n df_new['value'] = df_new['value'].astype(int)\n df_new['inventory'] = df_new['inventory'].astype(int)\n df_new2['brand'] = df_new2['brand'].astype(str)\n df_new2['inventory'] = df_new2['inventory'].astype(int)\n df_new4['value'] = df_new4['value'].astype(int)\n df_new4['brand'] = df_new4['brand'].astype(str)\n df_new4['discounted'] = df_new4['discounted'].astype(bool)\n assert_frame_equal(df_expected, df_new)\n assert_frame_equal(df_expected2, df_new2)\n assert_frame_equal(df_expected3, df_new3)\n assert_frame_equal(df_expected4, df_new4)\n assert_frame_equal(df_expected5, df_new5)\n\n def test_filter_row_implicit_null(self):\n # tests that implicit null values in the transformed dataframe are still removed\n df = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n action_payload = {\n 'action_type': 'filter',\n 'action_code': '%{1} != null',\n 'action_arguments': [],\n 'action_options': {},\n 'axis': 'row',\n 'action_variables': {\n '1': {\n 'id': 'value',\n 'type': 'feature',\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'value'\n } \n },\n },\n 'outputs': []\n }\n action = BaseAction(action_payload)\n df_new = action.execute(df).reset_index(drop=True)\n df_expected = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_new['value'] = df_new['value'].astype(int)\n assert_frame_equal(df_expected, df_new)\n\n def test_original_df_column_name_padding(self):\n # tests edge cases for when columns with the special prefixes \"orig_\" and \"tf_\" are given as input\n df = pd.DataFrame([\n [0,1, None],\n [1,2, np.NaN],\n [np.NaN, 3, 4],\n [3, None, 5]\n ], columns=[\n 'col',\n 'orig_col',\n 'tf_col'\n ])\n df_expected = pd.DataFrame([\n [0,1, None],\n [1,2, np.NaN],\n ], columns=[\n 'col',\n 'orig_col',\n 'tf_col'\n ])\n action = dict(action_code='(col != null) and (orig_col != null)')", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_expected = pd.DataFrame([\n ['[email protected]'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n assert_frame_equal(df_new, df_expected)\n assert_frame_equal(df_new2, df_expected)\n\n def test_filter_row_not_contains_string(self):\n df = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n action = dict(\n action_code='email not contains mailnet',\n )\n action2 = dict(\n action_code='email not contains \\'mailnet\\'',\n )\n action3 = dict(\n action_code = 'email not contains @',\n )\n action4 = dict(\n action_code = 'email not contains \\'^e+\\w\\'',\n )\n action_invalid = dict(\n action_code='subscription not contains False'\n ) \n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_expected1 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected2 = pd.DataFrame([\n [np.NaN, False],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected3 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False]\n ], columns=[\n 'email',\n 'subscription'\n ])\n assert_frame_equal(df_new, df_expected1)\n assert_frame_equal(df_new2, df_expected1)\n assert_frame_equal(df_new3, df_expected2)\n assert_frame_equal(df_new4, df_expected3)\n\n with self.assertRaises(Exception):\n _ = filter_rows(df, action_invalid, original_df=df).reset_index(drop=True)\n\n def test_filter_rows_multi_condition(self):\n df = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n action = dict(action_code='(value < 110 and value >= 50) and (value != null)')\n action2 = dict(action_code='brand contains brand and inventory != null')\n action3 = dict(action_code='(brand != null and value > 60) or (discounted == null)')\n action4 = dict(\n action_code='(discounted == True and inventory > 15)'\n ' or (discounted == False and value != null)'\n )\n action5 = dict(\n action_code='(brand not contains company and value == 75 and inventory <= 80)'\n ' or (discounted != null)'\n )\n df_expected = pd.DataFrame(\n [\n [100, None, '', 10],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected2 = pd.DataFrame(\n [\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected3 = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected4 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected5 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)", "type": "inproject" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_expected = pd.DataFrame([\n ['[email protected]'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n assert_frame_equal(df_new, df_expected)\n assert_frame_equal(df_new2, df_expected)\n\n def test_filter_row_not_contains_string(self):\n df = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n action = dict(\n action_code='email not contains mailnet',\n )\n action2 = dict(\n action_code='email not contains \\'mailnet\\'',\n )\n action3 = dict(\n action_code = 'email not contains @',\n )\n action4 = dict(\n action_code = 'email not contains \\'^e+\\w\\'',\n )\n action_invalid = dict(\n action_code='subscription not contains False'\n ) \n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_expected1 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected2 = pd.DataFrame([\n [np.NaN, False],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected3 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False]\n ], columns=[\n 'email',\n 'subscription'\n ])\n assert_frame_equal(df_new, df_expected1)\n assert_frame_equal(df_new2, df_expected1)\n assert_frame_equal(df_new3, df_expected2)\n assert_frame_equal(df_new4, df_expected3)\n\n with self.assertRaises(Exception):\n _ = filter_rows(df, action_invalid, original_df=df).reset_index(drop=True)\n\n def test_filter_rows_multi_condition(self):\n df = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n action = dict(action_code='(value < 110 and value >= 50) and (value != null)')\n action2 = dict(action_code='brand contains brand and inventory != null')\n action3 = dict(action_code='(brand != null and value > 60) or (discounted == null)')\n action4 = dict(\n action_code='(discounted == True and inventory > 15)'\n ' or (discounted == False and value != null)'\n )\n action5 = dict(\n action_code='(brand not contains company and value == 75 and inventory <= 80)'\n ' or (discounted != null)'\n )\n df_expected = pd.DataFrame(\n [\n [100, None, '', 10],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected2 = pd.DataFrame(\n [\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected3 = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected4 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected5 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_new5 = filter_rows(df, action5, original_df=df).reset_index(drop=True)\n df_new['value'] = df_new['value'].astype(int)\n df_new['inventory'] = df_new['inventory'].astype(int)\n df_new2['brand'] = df_new2['brand'].astype(str)\n df_new2['inventory'] = df_new2['inventory'].astype(int)\n df_new4['value'] = df_new4['value'].astype(int)\n df_new4['brand'] = df_new4['brand'].astype(str)\n df_new4['discounted'] = df_new4['discounted'].astype(bool)\n assert_frame_equal(df_expected, df_new)\n assert_frame_equal(df_expected2, df_new2)\n assert_frame_equal(df_expected3, df_new3)\n assert_frame_equal(df_expected4, df_new4)\n assert_frame_equal(df_expected5, df_new5)\n\n def test_filter_row_implicit_null(self):\n # tests that implicit null values in the transformed dataframe are still removed\n df = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n action_payload = {\n 'action_type': 'filter',\n 'action_code': '%{1} != null',\n 'action_arguments': [],\n 'action_options': {},\n 'axis': 'row',\n 'action_variables': {\n '1': {\n 'id': 'value',\n 'type': 'feature',\n 'feature': {\n 'column_type': 'number',\n 'uuid': 'value'\n } \n },\n },\n 'outputs': []\n }\n action = BaseAction(action_payload)", "type": "common" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_expected = pd.DataFrame([\n ['[email protected]'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n assert_frame_equal(df_new, df_expected)\n assert_frame_equal(df_new2, df_expected)\n\n def test_filter_row_not_contains_string(self):\n df = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n action = dict(\n action_code='email not contains mailnet',\n )\n action2 = dict(\n action_code='email not contains \\'mailnet\\'',\n )\n action3 = dict(\n action_code = 'email not contains @',\n )\n action4 = dict(\n action_code = 'email not contains \\'^e+\\w\\'',\n )\n action_invalid = dict(\n action_code='subscription not contains False'\n ) \n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_expected1 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected2 = pd.DataFrame([\n [np.NaN, False],\n ['fsdfsdfdsfdsf', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n df_expected3 = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False]\n ], columns=[\n 'email',\n 'subscription'\n ])\n assert_frame_equal(df_new, df_expected1)\n assert_frame_equal(df_new2, df_expected1)\n assert_frame_equal(df_new3, df_expected2)\n assert_frame_equal(df_new4, df_expected3)\n\n with self.assertRaises(Exception):\n _ = filter_rows(df, action_invalid, original_df=df).reset_index(drop=True)\n\n def test_filter_rows_multi_condition(self):\n df = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n action = dict(action_code='(value < 110 and value >= 50) and (value != null)')\n action2 = dict(action_code='brand contains brand and inventory != null')\n action3 = dict(action_code='(brand != null and value > 60) or (discounted == null)')\n action4 = dict(\n action_code='(discounted == True and inventory > 15)'\n ' or (discounted == False and value != null)'\n )\n action5 = dict(\n action_code='(brand not contains company and value == 75 and inventory <= 80)'\n ' or (discounted != null)'\n )\n df_expected = pd.DataFrame(\n [\n [100, None, '', 10],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected2 = pd.DataFrame(\n [\n [np.NaN, 'brand2', None, 18],\n [50, 'brand1', True, 13],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected3 = pd.DataFrame(\n [\n [100, None, '', 10],\n [250, 'brand1', False, np.NaN],\n [np.NaN, 'brand2', None, 18],\n [75, '', '', 80],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected4 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_expected5 = pd.DataFrame(\n [\n [250, 'brand1', False, np.NaN],\n [50, 'brand1', True, 13],\n [75, '', '', 80],\n [None, 'company3', False, 23],\n ],\n columns=['value', 'brand', 'discounted', 'inventory']\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)\n df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)\n df_new5 = filter_rows(df, action5, original_df=df).reset_index(drop=True)\n df_new['value'] = df_new['value'].astype(int)\n df_new['inventory'] = df_new['inventory'].astype(int)\n df_new2['brand'] = df_new2['brand'].astype(str)\n df_new2['inventory'] = df_new2['inventory'].astype(int)\n df_new4['value'] = df_new4['value'].astype(int)\n df_new4['brand'] = df_new4['brand'].astype(str)\n df_new4['discounted'] = df_new4['discounted'].astype(bool)\n assert_frame_equal(df_expected, df_new)\n assert_frame_equal(df_expected2, df_new2)\n assert_frame_equal(df_expected3, df_new3)\n assert_frame_equal(df_expected4, df_new4)\n assert_frame_equal(df_expected5, df_new5)\n", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n", "type": "non_informative" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_expected = pd.DataFrame([\n ['[email protected]'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n assert_frame_equal(df_new, df_expected)\n assert_frame_equal(df_new2, df_expected)\n\n def test_filter_row_not_contains_string(self):\n df = pd.DataFrame([\n [np.NaN, False],\n ['[email protected]', True],\n ['[email protected]', True],\n ['fsdfsdfdsfdsf', False],\n ['[email protected]', False],\n ['eeeeasdf', True]\n ], columns=[\n 'email',\n 'subscription'\n ])\n action = dict(\n action_code='email not contains mailnet',\n )\n action2 = dict(\n action_code='email not contains \\'mailnet\\'',\n )\n action3 = dict(", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)\n self.assertEqual(string_rows[1][2], '')\n\n def test_filter_rows_is_not_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n integer_rows = filter_rows(\n df,\n dict(action_code='integer != null'),\n original_df=df,\n )['integer'].values.tolist()\n self.assertEqual(integer_rows, [\n 2,\n 3,\n 1,\n 2,\n 3,\n 1,\n 2,\n 3,\n ])\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean != null'),\n original_df=df,\n )['boolean'].values.tolist()\n self.assertEqual(boolean_rows, [\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ])\n\n string_rows = filter_rows(\n df,\n dict(action_code='string != null'),\n original_df=df,\n )['string'].values.tolist()\n self.assertEqual(string_rows, [\n 'a',\n 'b',\n 'c',\n 'a',\n 'b',\n 'c',\n 'b',\n ])\n\n def test_filter_row_contains_string(self):\n df = pd.DataFrame([\n ['fsdijfosidjfiosfj'],\n ['[email protected]'],\n [np.NaN],\n ['fsdfsdfdsfdsf'],\n ['[email protected]'],\n ], columns=[\n 'id',\n ])\n action = dict(\n action_code='id contains @',\n )\n action2 = dict(\n action_code='id contains \\'@\\'',\n )\n df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)\n df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)\n df_expected = pd.DataFrame([\n ['[email protected]'],\n ['[email protected]'],\n ], columns=[\n 'id',", "type": "random" }, { "content": "from data_cleaner.tests.base_test import TestCase\nfrom data_cleaner.transformer_actions.base import BaseAction\nfrom data_cleaner.transformer_actions.row import (\n drop_duplicates,\n # explode,\n filter_rows,\n sort_rows,\n)\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RowTests(TestCase): \n def test_drop_duplicates(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n [1, True, 'c'],\n [0, True, 'd'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n (dict(action_arguments=['integer']), df.iloc[[2, 3]]),\n (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['boolean']), df.iloc[[0, 3]]),\n (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]),\n (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 2, 3]]),\n ]\n\n for action, val in test_cases:\n self.assertTrue(drop_duplicates(df, action).equals(val))\n\n # def test_explode(self):\n # df = pd.DataFrame([\n # ['(a, b, c)'],\n # ['[b, c, d]'],\n # [' e, f '],\n # ], columns=['tags'])\n # action = dict(\n # action_arguments=['tags'],\n # action_options={\n # 'separator': ',',\n # },\n # outputs=[\n # dict(\n # uuid='tag',\n # column_type='text',\n # ),\n # ],\n # )\n # df_new = explode(df, action)\n # df_expected = pd.DataFrame([\n # ['a', '(a, b, c)'],\n # ['b', '(a, b, c)'],\n # ['c', '(a, b, c)'],\n # ['b', '[b, c, d]'],\n # ['c', '[b, c, d]'],\n # ['d', '[b, c, d]'],\n # ['e', ' e, f '],\n # ['f', ' e, f '],\n # ], columns=['tag', 'tags'])\n # assert_frame_equal(df_new.reset_index(drop=True), df_expected)\n\n def test_filter_rows(self):\n df = pd.DataFrame([\n [0, False, 'a'],\n [1, True, 'b'],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n test_cases = [\n ([0, False, 'a'], 'integer == 0'),\n ([0, False, 'a'], 'string == \\'a\\''),\n ([1, True, 'b'], 'boolean == True'),\n ([1, True, 'b'], 'integer >= 1'),\n ([1, True, 'b'], 'integer >= 1 and boolean == True'),\n ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \\'b\\')'),\n ]\n\n for val, query in test_cases:\n self.assertEqual(\n val,\n filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),\n )\n\n def test_filter_rows_is_null(self):\n df = pd.DataFrame([\n [None, False, 'a'],\n [2, True, 'b'],\n [3, False, 'c'],\n [1, None, 'a'],\n [2, True, 'b'],\n [3, '', 'c'],\n [1, False, None],\n [2, True, 'b'],\n [3, False, ''],\n ], columns=[\n 'integer',\n 'boolean',\n 'string',\n ])\n\n integer_rows = filter_rows(\n df,\n dict(action_code='integer == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(integer_rows), 1)\n self.assertEqual(integer_rows[0][1], False)\n self.assertEqual(integer_rows[0][2], 'a')\n\n boolean_rows = filter_rows(\n df,\n dict(action_code='boolean == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(boolean_rows), 2)\n self.assertEqual(boolean_rows[0][0], 1.0)\n self.assertEqual(boolean_rows[0][1], None)\n self.assertEqual(boolean_rows[0][2], 'a')\n self.assertEqual(boolean_rows[1][0], 3.0)\n self.assertEqual(boolean_rows[1][1], '')\n self.assertEqual(boolean_rows[1][2], 'c')\n\n string_rows = filter_rows(\n df,\n dict(action_code='string == null'),\n original_df=df,\n ).values.tolist()\n self.assertEqual(len(string_rows), 2)\n self.assertEqual(string_rows[0][0], 1.0)\n self.assertEqual(string_rows[0][1], False)\n self.assertEqual(string_rows[0][2], None)\n self.assertEqual(string_rows[1][0], 3.0)\n self.assertEqual(string_rows[1][1], False)", "type": "random" } ]
[ " string_rows = filter_rows(", " df_new = filter_rows(df, action, original_df=df).reset_index(drop=True)", " df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True)", " df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True)", " df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True)", " self.assertTrue(sort_rows(df, action).equals(val))", " action = BaseAction(action_payload)", " integer_rows = filter_rows(", " self.assertTrue(drop_duplicates(df, action).equals(val))", " _ = filter_rows(df, action_invalid, original_df=df).reset_index(drop=True)", " boolean_rows = filter_rows(", " df_new = filter_rows(df, action, original_df = df)", " filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(),", " df_new5 = filter_rows(df, action5, original_df=df).reset_index(drop=True)", " df_new = action.execute(df).reset_index(drop=True)", "", " # ], columns=['tags'])", "from data_cleaner.transformer_actions.row import (", " def test_filter_row_implicit_null(self):", " def test_filter_row_contains_string(self):", " [2, True, 'b'],", " 2,", " action_code = 'email not contains @',", " ])", " self.assertEqual(string_rows[1][2], '')" ]
METASEP
20
mage-ai__mage-ai
mage-ai__mage-ai METASEP mage_ai/data_cleaner/transformer_actions/udf/substring.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Substring(BaseUDF): def execute(self): start = self.options.get('start') stop = self.options.get('stop') if start is None and stop is None: raise Exception('Require at least one of `start` and `stop` parameters.') return self.df[self.arguments[0]].str.slice(start=start, stop=stop) mage_ai/data_cleaner/transformer_actions/udf/string_split.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class StringSplit(BaseUDF): def execute(self): separator = self.options.get('separator') part_index = self.options.get('part_index') if separator is None or part_index is None: raise Exception('Require both `separator` and `part_index` parameters.') return self.df[self.arguments[0]].str.split(separator).str[part_index].str.strip() mage_ai/data_cleaner/transformer_actions/udf/string_replace.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class StringReplace(BaseUDF): def execute(self): pattern = self.options.get('pattern') replacement = self.options.get('replacement') if not pattern and not replacement: raise Exception(f'Require both `pattern` and `replacement` parameters.') return self.df[self.arguments[0]].str.replace(pattern, replacement) mage_ai/data_cleaner/transformer_actions/udf/multiply.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Multiply(BaseUDF): def execute(self): col1 = self.arguments[0] if len(self.arguments) > 1: col2 = self.arguments[1] return self.df[col1].astype(float) * self.df[col2].astype(float) elif self.options.get('value') is not None: return self.df[col1] * float(self.options['value']) raise Exception('Require second column or a value to multiply.') mage_ai/data_cleaner/transformer_actions/udf/if_else.py METASEP from data_cleaner.transformer_actions.action_code import query_with_action_code from data_cleaner.transformer_actions.udf.base import BaseUDF class IfElse(BaseUDF): def execute(self): df_copy = self.df.copy() true_index = query_with_action_code(df_copy, self.code, self.kwargs).index arg1_type = self.options.get('arg1_type', 'value') arg2_type = self.options.get('arg2_type', 'value') arg1 = self.arguments[0] if arg1_type == 'column': arg1 = df_copy[arg1] arg2 = self.arguments[1] if arg2_type == 'column': arg2 = df_copy[arg2] df_copy.loc[true_index, 'result'] = arg1 df_copy['result'] = df_copy['result'].fillna(arg2) return df_copy['result'] mage_ai/data_cleaner/transformer_actions/udf/formatted_date.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import pandas as pd class FormattedDate(BaseUDF): def execute(self): return pd.to_datetime( self.df[self.arguments[0]], ).dt.strftime(self.options['format']) mage_ai/data_cleaner/transformer_actions/udf/divide.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Divide(BaseUDF): def execute(self): col1 = self.arguments[0] if len(self.arguments) > 1: col2 = self.arguments[1] return self.df[col1].astype(float) / self.df[col2].astype(float) elif self.options.get('value') is not None: return self.df[col1] / float(self.options['value']) raise Exception('Require second column or a value to divide.') mage_ai/data_cleaner/transformer_actions/udf/distance_between.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np EARTH_RADIUS = 6371 class DistanceBetween(BaseUDF): def execute(self): def __haversine(lat1, lng1, lat2, lng2): lat1, lng1, lat2, lng2 = np.radians([lat1, lng1, lat2, lng2]) a = np.sin((lat2-lat1)/2.0)**2 + \ np.cos(lat1) * np.cos(lat2) * np.sin((lng2-lng1)/2.0)**2 return EARTH_RADIUS * 2 * np.arcsin(np.sqrt(a)) return __haversine( self.df[self.arguments[0]], self.df[self.arguments[1]], self.df[self.arguments[2]], self.df[self.arguments[3]], ) mage_ai/data_cleaner/transformer_actions/udf/difference.py METASEP from data_cleaner.column_type_detector import DATETIME from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np import pandas as pd class Difference(BaseUDF): def execute(self): col1 = self.arguments[0] column_type = self.options.get('column_type', self.kwargs.get('column_types', {}).get(col1)) if len(self.arguments) > 1: col2 = self.arguments[1] return self.__difference_between_columns( self.df[col1], self.df[col2], column_type=column_type, options=self.options, ) elif self.options.get('value') is not None: return self.__subtract_value( self.df[col1], self.options['value'], column_type=column_type, options=self.options, ) raise Exception('Require second column or a value to minus.') def __difference_between_columns(self, column1, column2, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return (pd.to_datetime(column1, utc=True) - pd.to_datetime(column2, utc=True)).dt.days return column1 - column2 def __subtract_value(self, original_column, value, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return ( pd.to_datetime(original_column, utc=True) - pd.to_timedelta(value, unit=time_unit) ).dt.strftime('%Y-%m-%d %H:%M:%S') return original_column - value mage_ai/data_cleaner/transformer_actions/udf/date_trunc.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np import pandas as pd class DateTrunc(BaseUDF): def execute(self): date_part = self.options['date_part'] date_column = self.arguments[0] df_copy = self.df.copy() df_copy[date_column] = pd.to_datetime(df_copy[date_column]) if date_part == 'week': return (df_copy[date_column] - df_copy[date_column].dt.weekday * np.timedelta64(1, 'D')).\ dt.strftime('%Y-%m-%d') raise Exception(f'Date part {date_part} is not supported.') mage_ai/data_cleaner/transformer_actions/udf/constant.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Constant(BaseUDF): def execute(self): return self.arguments[0] mage_ai/data_cleaner/transformer_actions/udf/base.py METASEP import importlib class BaseUDF(): def __init__(self, df, arguments=[], code=None, options={}, kwargs={}): self.df = df self.arguments = arguments self.code = code self.options = options self.kwargs = kwargs def execute(self): pass def execute_udf(udf_name, df, arguments, code, options, kwargs): udf_class = getattr( importlib.import_module(f'data_cleaner.transformer_actions.udf.{udf_name}'), udf_name.title().replace('_', ''), ) return udf_class(df, arguments, code, options, kwargs).execute() mage_ai/data_cleaner/transformer_actions/udf/addition.py METASEP from data_cleaner.column_type_detector import DATETIME from data_cleaner.transformer_actions.udf.base import BaseUDF import pandas as pd class Addition(BaseUDF): def execute(self): col1 = self.arguments[0] df_result = self.df[col1] column_type = self.options.get("column_type", self.kwargs.get('column_types', {}).get(col1)) if len(self.arguments) == 1 and 'value' not in self.options: raise Exception('Require second column or a value to add.') if len(self.arguments) > 1: for col in self.arguments[1:]: df_result = df_result + self.df[col] if self.options.get('value') is not None: df_result = self.__add_value( df_result, self.options['value'], column_type=column_type, options=self.options, ) return df_result def __add_value(self, original_column, value, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return ( pd.to_datetime(original_column, utc=True) + pd.to_timedelta(value, unit=time_unit) ).dt.strftime('%Y-%m-%d %H:%M:%S') return original_column + value mage_ai/data_cleaner/transformer_actions/udf/__init__.py METASEP mage_ai/data_cleaner/transformer_actions/variable_replacer.py METASEP from data_cleaner.transformer_actions.constants import VariableType import re def interpolate(text, key, variable_data): """ text: string to operate on key: key to search within text variable_data: dictionary containing data used to interpolate """ regex_replacement = key if variable_data['type'] == VariableType.FEATURE: regex_replacement = variable_data[VariableType.FEATURE]['uuid'] elif variable_data['type'] == VariableType.FEATURE_SET_VERSION: regex_replacement = \ variable_data[VariableType.FEATURE_SET_VERSION][VariableType.FEATURE_SET]['uuid'] regex_pattern = re.compile( '\%__BRACKETS_START__{}__BRACKETS_END__' .format(key) .replace('__BRACKETS_START__', '\{') .replace('__BRACKETS_END__', '\}') ) return re.sub(regex_pattern, regex_replacement, str(text)) def replace_true_false(action_code): regex_pattern_true = re.compile(' true') regex_pattern_false = re.compile(' false') return re.sub( regex_pattern_true, ' True', re.sub(regex_pattern_false, ' False', action_code), ) mage_ai/data_cleaner/transformer_actions/utils.py METASEP from data_cleaner.transformer_actions.constants import ActionType, Axis def columns_to_remove(transformer_actions): arr = filter( lambda x: x['action_type'] == ActionType.REMOVE and x['axis'] == Axis.COLUMN, transformer_actions, ) columns = [] for transformer_action in arr: columns += transformer_action['action_arguments'] return columns mage_ai/data_cleaner/transformer_actions/shared.py METASEP TEST_ACTION = dict( action_type='filter', axis='row', action_code='%{1}.%{1_1} == True and (%{1}.%{1_2} == \"The Quant\" or %{1}.%{1_2} == \"Yield\")', action_arguments=[ '%{1}.%{1_1}', '%{2}.%{2_1}', ], action_options=dict( condition='%{1}.%{1_3} >= %{2}.%{2_2} and %{2}.%{2_2} >= %{1}.%{1_3} - 2592000', default=0, timestamp_feature_a='%{1}.%{1_2}', timestamp_feature_b='%{1}.%{1_3}', window=2592000, ), action_variables={ '1': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='omni'), )), '1_1': dict(type='feature', feature=dict(column_type='number', uuid='deposited')), '1_2': dict(type='feature', feature=dict(column_type='category', uuid='fund')), '1_3': dict(type='feature', feature=dict(column_type='category', uuid='delivered_at')), '1_4': dict(type='feature', feature=dict(column_type='number_with_decimals', uuid='amount')), '2': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='magic'), )), '2_1': dict(type='feature', feature=dict(column_type='category', uuid='spell')), '2_2': dict(type='feature', feature=dict(column_type='category', uuid='booked_at')), '3_1': dict(type='feature', feature=dict(column_type='number', uuid='age')), }, ) mage_ai/data_cleaner/transformer_actions/row.py METASEP from data_cleaner.column_type_detector import NUMBER_TYPES from data_cleaner.transformer_actions.constants import VariableType from data_cleaner.transformer_actions.action_code import query_with_action_code import pandas as pd def drop_duplicates(df, action, **kwargs): keep = action.get('action_options', {}).get('keep', 'last') action_args = dict(keep=keep) subset_cols = action.get('action_arguments') if subset_cols is not None and len(subset_cols) > 0: action_args['subset'] = subset_cols return df.drop_duplicates(**action_args) def filter_rows(df, action, **kwargs): """ df: Pandas DataFrame action: TransformerAction serialized into a dictionary """ action_code = action['action_code'] return query_with_action_code(df, action_code, kwargs) def sort_rows(df, action, **kwargs): ascending = action.get('action_options', {}).get('ascending', True) ascendings = action.get('action_options', {}).get('ascendings', []) if len(ascendings) > 0: ascending = ascendings[0] feature_by_uuid = {} if action.get('action_variables'): for _, val in action['action_variables'].items(): feature = val.get('feature') if feature: feature_by_uuid[feature['uuid']] = feature na_indexes = None as_types = {} for idx, uuid in enumerate(action['action_arguments']): feature = feature_by_uuid.get(uuid) if feature and feature['column_type'] in NUMBER_TYPES: as_types[uuid] = float if idx == 0: na_indexes = df[(df[uuid].isnull()) | (df[uuid].astype(str).str.len() == 0)].index bad_df = None if na_indexes is not None: bad_df = df.index.isin(na_indexes) index = (df[~bad_df] if bad_df is not None else df).astype(as_types).sort_values( by=action['action_arguments'], ascending=ascendings if len(ascendings) > 0 else ascending, ).index df_final = df.loc[index] if bad_df is not None: if ascending: return pd.concat([ df.iloc[bad_df], df_final, ]) return pd.concat([ df_final, df.iloc[bad_df], ]) return df_final mage_ai/data_cleaner/transformer_actions/helpers.py METASEP from data_cleaner.column_type_detector import NUMBER, NUMBER_WITH_DECIMALS, TEXT from data_cleaner.transformer_actions.constants import ActionType, Operator, VariableType import numpy as np import re DAY_SECONDS = 86400 HOUR_SECONDS = 3600 def convert_col_type(df_col, col_type): if col_type == NUMBER: return df_col.replace(r'^\s*$', 0, regex=True).fillna(0).astype(np.int64) elif col_type == NUMBER_WITH_DECIMALS: return df_col.dropna().astype(float) elif col_type == TEXT: return df_col.dropna().astype(str) return df_col def convert_value_type(feature_uuid, action, value): action_variables = action.get('action_variables', {}) column_type = None for v in action_variables.values(): if v['type'] == 'feature' and v['feature']['uuid'] == feature_uuid: column_type = v['feature']['column_type'] break if column_type == NUMBER: value = int(value) elif column_type == NUMBER_WITH_DECIMALS: value = float(value) return value def drop_na(df): return df.replace(r'^\s*$', np.nan, regex=True).dropna() def extract_join_feature_set_version_id(payload): if payload['action_type'] != ActionType.JOIN: return None join_feature_set_version_id = payload['action_arguments'][0] if type(join_feature_set_version_id) == str and \ join_feature_set_version_id.startswith('%{'): join_feature_set_version_id = next( v['id'] for v in payload['action_variables'].values() if v['type'] == VariableType.FEATURE_SET_VERSION ) return join_feature_set_version_id def get_column_type(feature_uuid, action): action_variables = action.get('action_variables', {}) column_type = None for v in action_variables.values(): if v['type'] == 'feature' and v['feature']['uuid'] == feature_uuid: column_type = v['feature']['column_type'] break return column_type def get_time_window_str(window_in_seconds): if window_in_seconds is None: return None if window_in_seconds >= DAY_SECONDS: time_window = f'{int(window_in_seconds / DAY_SECONDS)}d' elif window_in_seconds >= HOUR_SECONDS: time_window = f'{int(window_in_seconds / HOUR_SECONDS)}h' else: time_window = f'{window_in_seconds}s' return time_window mage_ai/data_cleaner/transformer_actions/constants.py METASEP class ActionType(): ADD = 'add' AVERAGE = 'average' CLEAN_COLUMN_NAME = 'clean_column_name' COUNT = 'count' COUNT_DISTINCT = 'count_distinct' DIFF = 'diff' DROP_DUPLICATE = 'drop_duplicate' EXPAND_COLUMN = 'expand_column' EXPLODE = 'explode' FILTER = 'filter' FIRST = 'first' GROUP = 'group' IMPUTE = 'impute' JOIN = 'join' LAST = 'last' LIMIT = 'limit' MAX = 'max' MEDIAN = 'median' MIN = 'min' MODE = 'mode' REMOVE = 'remove' SCALE = 'scale' SELECT = 'select' SHIFT_DOWN = 'shift_down' SHIFT_UP = 'shift_up' SORT = 'sort' SUM = 'sum' UNION = 'union' UPDATE_TYPE = 'update_type' UPDATE_VALUE = 'update_value' class Axis(): COLUMN = 'column' ROW = 'row' class VariableType(): FEATURE = 'feature' FEATURE_SET = 'feature_set' FEATURE_SET_VERSION = 'feature_set_version' class Operator(): CONTAINS = 'contains' NOT_CONTAINS = 'not contains' EQUALS = '==' NOT_EQUALS = '!=' GREATER_THAN = '>' GREATER_THAN_OR_EQUALS = '>=' LESS_THAN = '<' LESS_THAN_OR_EQUALS = '<=' mage_ai/data_cleaner/transformer_actions/column.py METASEP from data_cleaner.column_type_detector import REGEX_NUMBER from data_cleaner.transformer_actions.action_code import query_with_action_code from data_cleaner.transformer_actions.helpers import ( convert_col_type, get_column_type, get_time_window_str, ) from data_cleaner.transformer_actions.udf.base import execute_udf from keyword import iskeyword import pandas as pd import numpy as np import re def add_column(df, action, **kwargs): col = action['outputs'][0]['uuid'] col_type = action['outputs'][0]['column_type'] udf = action['action_options'].get('udf') if udf is None: return df df_copy = df.copy() df_copy[col] = execute_udf( udf, df, action.get('action_arguments'), action.get('action_code'), action.get('action_options'), kwargs, ) df_copy[col] = convert_col_type(df_copy[col], col_type) return df_copy def average(df, action, **kwargs): return __agg(df, action, 'mean') def count(df, action, **kwargs): return __groupby_agg(df, action, 'count') def count_distinct(df, action, **kwargs): return __groupby_agg(df, action, 'nunique') def clean_column_name(df, action, **kwargs): columns = action['action_arguments'] mapping = {} for column in columns: orig_name = column if iskeyword(column): column = f'{column}_' column = column.lower() column = re.sub(r'[\s\t\-\.]', '_', column) column = re.sub(r'[^a-z0-9\_]', '', column) column = REGEX_NUMBER.sub(lambda number: f'number_{number.group(0)}', column) if column == 'true' or column == 'false': column = f'{column}_' if iskeyword(column): # check second time if a keyword appears after removing nonalphanum column = f'{column}_' mapping[orig_name] = column return df.rename(columns=mapping) def diff(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].diff() return df def first(df, action, **kwargs): return __agg(df, action, 'first') def impute(df, action, **kwargs): columns = action['action_arguments'] action_options = action['action_options'] strategy = action_options.get('strategy') value = action_options.get('value') empty_string_pattern = r'^\s*$' df[columns] = df[columns].replace(empty_string_pattern, np.nan, regex=True) if strategy == 'average': df[columns] = df[columns].fillna(df[columns].astype(float).mean(axis=0)) elif strategy == 'median': df[columns] = df[columns].fillna(df[columns].astype(float).median(axis=0)) elif strategy == 'column': replacement_df = pd.DataFrame({col: df[value] for col in columns}) df[columns] = df[columns].fillna(replacement_df) elif value is not None: df[columns] = df[columns].fillna(value) else: raise Exception('Require a valid strategy or value') for col in columns: col_type = get_column_type(col, action) df[col] = convert_col_type(df[col], col_type) return df def max(df, action, **kwargs): return __agg(df, action, 'max') def median(df, action, **kwargs): return __agg(df, action, 'median') def min(df, action, **kwargs): return __agg(df, action, 'min') def remove_column(df, action, **kwargs): cols = action['action_arguments'] original_columns = df.columns drop_columns = [col for col in cols if col in original_columns] return df.drop(columns=drop_columns) def last(df, action, **kwargs): return __agg(df, action, 'last') def select(df, action, **kwargs): return df[action['action_arguments']] def shift_down(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] action_options = action.get('action_options', {}) groupby_columns = action_options.get('groupby_columns') periods = action_options.get('periods', 1) if groupby_columns is not None: df[output_col] = df.groupby(groupby_columns)[action['action_arguments'][0]].shift(periods) else: df[output_col] = df[action['action_arguments'][0]].shift(periods) return df def shift_up(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].shift(-1) return df def sum(df, action, **kwargs): return __agg(df, action, 'sum') def __agg(df, action, agg_method): if action['action_options'].get('groupby_columns'): return __groupby_agg(df, action, agg_method) else: output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].agg(agg_method) return df def __column_mapping(action): return dict(zip(action['action_arguments'], [o['uuid'] for o in action['outputs']])) # Filter by timestamp_feature_a - window <= timestamp_feature_b <= timestamp_feature_a def __filter_df_with_time_window(df, action): action_options = action['action_options'] time_window_keys = ['timestamp_feature_a', 'timestamp_feature_b', 'window'] if all(k in action_options for k in time_window_keys): window_in_seconds = action_options['window'] df_time_diff = \ (pd.to_datetime(df[action_options['timestamp_feature_a']], utc=True) - \ pd.to_datetime(df[action_options['timestamp_feature_b']], utc=True)).dt.total_seconds() if window_in_seconds > 0: df_time_diff_filtered = \ df_time_diff[(df_time_diff <= window_in_seconds) & (df_time_diff >= 0)] else: df_time_diff_filtered = \ df_time_diff[(df_time_diff >= window_in_seconds) & (df_time_diff <= 0)] df_filtered = df.loc[df_time_diff_filtered.index] time_window = get_time_window_str(window_in_seconds) else: df_filtered = df time_window = None return df_filtered, time_window def __groupby_agg(df, action, agg_method): df_filtered, _ = __filter_df_with_time_window(df, action) action_code = action.get('action_code') if action_code is not None and action_code != '': df_filtered = query_with_action_code(df_filtered, action_code, { 'original_df': df_filtered, }) action_options = action['action_options'] df_agg = df_filtered.groupby( action_options['groupby_columns'], )[action['action_arguments']].agg(agg_method) return df.merge( df_agg.rename(columns=__column_mapping(action)), on=action_options['groupby_columns'], how='left', ) mage_ai/data_cleaner/transformer_actions/base.py METASEP from data_cleaner.transformer_actions import column, row from data_cleaner.transformer_actions.constants import ActionType, Axis, VariableType from data_cleaner.transformer_actions.helpers import drop_na from data_cleaner.transformer_actions.variable_replacer import ( interpolate, replace_true_false, ) # from pipelines.column_type_pipelines import COLUMN_TYPE_PIPELINE_MAPPING import json COLUMN_TYPE_PIPELINE_MAPPING = {} FUNCTION_MAPPING = { Axis.COLUMN: { ActionType.ADD: column.add_column, ActionType.AVERAGE: column.average, ActionType.CLEAN_COLUMN_NAME: column.clean_column_name, ActionType.COUNT: column.count, ActionType.COUNT_DISTINCT: column.count_distinct, ActionType.DIFF: column.diff, # ActionType.EXPAND_COLUMN: column.expand_column, ActionType.FIRST: column.first, ActionType.IMPUTE: column.impute, ActionType.LAST: column.last, ActionType.MAX: column.max, ActionType.MEDIAN: column.median, ActionType.MIN: column.min, ActionType.REMOVE: column.remove_column, ActionType.SELECT: column.select, ActionType.SHIFT_DOWN: column.shift_down, ActionType.SHIFT_UP: column.shift_up, ActionType.SUM: column.sum, }, Axis.ROW: { ActionType.DROP_DUPLICATE: row.drop_duplicates, # ActionType.EXPLODE: row.explode, ActionType.FILTER: row.filter_rows, ActionType.SORT: row.sort_rows, }, } class BaseAction(): def __init__(self, action): self.action = action self.columns_by_type = {} for variable_data in self.action.get('action_variables', {}).values(): if not variable_data: continue feature = variable_data.get(VariableType.FEATURE) if not feature: continue column_type = feature.get('column_type') if not self.columns_by_type.get(column_type): self.columns_by_type[column_type] = [] self.columns_by_type[column_type].append(feature['uuid']) @property def action_type(self): return self.action['action_type'] @property def axis(self): return self.action['axis'] def execute(self, df, **kwargs): self.hydrate_action() self.action['action_code'] = replace_true_false(self.action['action_code']) if df.empty: return df if self.action_type in [ActionType.FILTER, ActionType.ADD]: df_transformed = self.transform(df) else: df_transformed = df if self.action_type == ActionType.GROUP: df_output = self.groupby(df, self.action) elif self.action_type == ActionType.JOIN: df_to_join = kwargs.get('df_to_join') df_output = self.join(df, df_to_join, self.action) else: column_types = {} for column_type, cols in self.columns_by_type.items(): for col in cols: column_types[col] = column_type df_output = FUNCTION_MAPPING[self.axis][self.action_type]( df_transformed, self.action, column_types=column_types, original_df=df, ) if self.action_type == ActionType.FILTER: return df.loc[df_output.index][df_output.columns] elif self.action_type == ActionType.ADD: output_cols = [f['uuid'] for f in self.action['outputs']] df[output_cols] = df_output[output_cols] return df else: return df_output def groupby(self, df, action): def __transform_partition(pdf, actions): for action in actions: pdf = BaseAction(action).execute(pdf) return pdf groupby_columns = action['action_arguments'] return df.groupby(groupby_columns).apply(lambda x: __transform_partition(x, action['child_actions'])) def hydrate_action(self): for k, v in self.action['action_variables'].items(): """ k: 1, 1_1 v: { 'type': 'feature', 'id': 1, 'feature': { 'uuid': 'mage', }, } """ if not v: continue if self.action.get('action_code'): self.action['action_code'] = interpolate(self.action['action_code'], k, v) if self.action.get('action_arguments'): self.action['action_arguments'] = [interpolate( args_text, k, v, ) for args_text in self.action['action_arguments']] if self.action.get('action_options'): action_options_json = json.dumps(self.action['action_options']) self.action['action_options'] = json.loads(interpolate(action_options_json, k, v)) def join(self, df, df_to_join, action): action_options = action['action_options'] left_on = action_options['left_on'] right_on = action_options['right_on'] for i in range(len(left_on)): col1, col2 = left_on[i], right_on[i] if df[col1].dtype != df_to_join[col2].dtype: df[col1] = drop_na(df[col1]).astype(str) df_to_join[col2] = drop_na(df_to_join[col2]).astype(str) if action.get('outputs') is not None: feature_rename_mapping = { f['source_feature']['uuid']:f['uuid'] for f in action['outputs'] if f.get('source_feature') is not None } df_to_join_renamed = df_to_join.rename(columns=feature_rename_mapping) right_on = [feature_rename_mapping.get(key, key) for key in right_on] else: df_to_join_renamed = df_to_join how = action_options.get('how', 'left') df_merged = df.merge(df_to_join_renamed, left_on=left_on, right_on=right_on, how=how) drop_columns = action_options.get('drop_columns', []) rename_columns = action_options.get('rename_columns', {}) return df_merged.drop(columns=drop_columns).rename(columns=rename_columns) def transform(self, df): df_copy = df.copy() current_columns = df_copy.columns for column_type, original_columns in self.columns_by_type.items(): cols = [col for col in original_columns if col in current_columns] if len(cols) == 0: continue build_pipeline = COLUMN_TYPE_PIPELINE_MAPPING.get(column_type) if not build_pipeline: continue df_copy[cols] = build_pipeline().fit_transform(df_copy[cols]) return df_copy mage_ai/data_cleaner/transformer_actions/action_code.py METASEP from data_cleaner.transformer_actions.constants import Operator import re ACTION_CODE_CONDITION_PATTERN = re.compile( r'([^\s()]+) ([!=<>]+|(?:contains)|(?:not contains)) ([^\s()]+)' ) ORIGINAL_COLUMN_PREFIX = 'orig_' TRANSFORMED_COLUMN_PREFIX = 'tf_' def __query_mutate_null_type(match, dtype): condition = [''] column_name, operator, _ = match.groups() column_name = f'{ORIGINAL_COLUMN_PREFIX}{column_name}' if operator == '==': condition.append(f'({column_name}.isna()') if dtype == bool: condition.append(f' | {column_name} == \'\'') elif dtype == str: condition.append(f' | {column_name}.str.len() == 0') condition.append(f')') else: condition.append(f'({column_name}.notna()') if dtype == bool: condition.append(f' & {column_name} != \'\'') elif dtype == str: condition.append(f' & {column_name}.str.len() >= 1') condition.append(f')') return ''.join(condition) def __query_mutate_contains_op(match): column_name, operator, value = match.groups() column_name = f'{TRANSFORMED_COLUMN_PREFIX}{column_name}' value = value.strip('\'').strip('\"') if operator == Operator.CONTAINS: condition = f'({column_name}.notna() & {column_name}.str.contains(\'{value}\'))' else: condition = f'~({column_name}.notna() & {column_name}.str.contains(\'{value}\'))' return condition def __query_mutate_default_case(match, column_set): column_name, operator, value = match.groups() column_name = f'{TRANSFORMED_COLUMN_PREFIX}{column_name}' if value in column_set: # if comparison is with another column, prefix value with column identifier value = f'{TRANSFORMED_COLUMN_PREFIX}{value}' return f'{column_name} {operator} {value}' def __get_column_type(df, cache, column_name): dtype = cache.get(column_name, None) if dtype is None: dropped_na = df[column_name].dropna() dropped_na = dropped_na[~dropped_na.isin([''])] dtype = type(dropped_na.iloc[0]) if len(dropped_na.index) >= 1 else object cache[column_name] = dtype return dtype def query_with_action_code(df, action_code, kwargs): transformed_types, original_types = {}, {} original_df, original_merged = kwargs.get('original_df', None), False reconstructed_code = [] queried_df = df.copy().add_prefix(TRANSFORMED_COLUMN_PREFIX) column_set = set(df.columns) prev_end = 0 for match in ACTION_CODE_CONDITION_PATTERN.finditer(action_code): column_name, operator, value = match.groups() reconstructed_code.append(action_code[prev_end: match.start()]) prev_end = match.end() if operator == Operator.CONTAINS or operator == Operator.NOT_CONTAINS: transformed_dtype = __get_column_type(df, transformed_types, column_name) if transformed_dtype != str: raise TypeError( f'\'{operator}\' can only be used on string columns, {transformed_dtype}' ) reconstructed_code.append(__query_mutate_contains_op(match)) elif (operator == Operator.EQUALS or operator == Operator.NOT_EQUALS) and value == 'null': if original_df is None: raise Exception( 'Null value queries require original dataframe as keyword argument' ) elif not original_merged: queried_df = queried_df.join(original_df.add_prefix(ORIGINAL_COLUMN_PREFIX)) original_merged = True original_dtype = __get_column_type(original_df, original_types, column_name) reconstructed_code.append(__query_mutate_null_type(match, original_dtype)) else: reconstructed_code.append(__query_mutate_default_case(match, column_set)) reconstructed_code.append(action_code[prev_end:]) action_code = ''.join(reconstructed_code) queried_df = queried_df.query(action_code).rename( lambda x: x[len(TRANSFORMED_COLUMN_PREFIX):], axis='columns' ) return queried_df[df.columns] mage_ai/data_cleaner/transformer_actions/__init__.py METASEP mage_ai/data_cleaner/tests/__init__.py METASEP mage_ai/data_cleaner/statistics/calculator.py METASEP from data_cleaner.shared.hash import merge_dict from data_cleaner.shared.multi import run_parallel from data_cleaner.column_type_detector import ( DATETIME, NUMBER, NUMBER_TYPES, NUMBER_WITH_DECIMALS, get_mismatched_row_count, ) import math import numpy as np import pandas as pd import traceback VALUE_COUNT_LIMIT = 255 def increment(metric, tags): pass class timer(object): """ with timer('metric.metric', tags={ 'key': 'value' }): function() """ def __init__(self, metric, tags={}): pass def __enter__(self): pass def __exit__(self, type, value, traceback): pass class StatisticsCalculator(): def __init__( self, # s3_client, # object_key_prefix, # feature_set_version, column_types, **kwargs, ): self.column_types = column_types @property def data_tags(self): return dict() def process(self, df): return self.calculate_statistics_overview(df) def calculate_statistics_overview(self, df): increment( 'lambda.transformer_actions.calculate_statistics_overview.start', self.data_tags, ) with timer( 'lambda.transformer_actions.calculate_statistics_overview.time', self.data_tags): data = dict(count=len(df.index)) arr_args_1 = [df[col] for col in df.columns], arr_args_2 = [col for col in df.columns], dicts = run_parallel(self.statistics_overview, arr_args_1, arr_args_2) for d in dicts: data.update(d) # object_key = s3_paths.path_statistics_overview(self.object_key_prefix) # s3_data.upload_json_sorted(self.s3_client, object_key, data) increment( 'lambda.transformer_actions.calculate_statistics_overview.success', self.data_tags, ) return data def statistics_overview(self, series, col): try: return self.__statistics_overview(series, col) except Exception as err: increment( 'lambda.transformer_actions.calculate_statistics_overview.column.failed', merge_dict(self.data_tags, { 'col': col, 'error': err.__class__.__name__, }), ) traceback.print_exc() return {} def __statistics_overview(self, series, col): # The following regex based replace has high overheads # series = series.replace(r'^\s*$', np.nan, regex=True) series_cleaned = series.map(lambda x: x if (not isinstance(x, str) or (len(x) > 0 and not x.isspace())) else np.nan) df_value_counts = series_cleaned.value_counts(dropna=False) df = df_value_counts.reset_index() df.columns = [col, 'count'] df_top_value_counts = df if df.shape[0] > VALUE_COUNT_LIMIT: df_top_value_counts = df.head(VALUE_COUNT_LIMIT) # TODO: remove duplicate data for distinct values # object_key_distinct_values = s3_paths.path_distinct_values_by_column(self.object_key_prefix, col) # s3_data.upload_dataframe(self.s3_client, df_top_value_counts, object_key_distinct_values, columns=[col]) # object_key_statistics = s3_paths.path_statistics_by_column(self.object_key_prefix, col) # s3_data.upload_dataframe(self.s3_client, df_top_value_counts, object_key_statistics) # features = self.feature_set_version['features'] # feature = find(lambda x: x['uuid'] == col, features) # if feature and feature.get('transformed'): # return {} column_type = self.column_types.get(col) series_non_null = series_cleaned.dropna() if column_type == NUMBER: series_non_null = series_non_null.astype(float).astype(int) elif column_type == NUMBER_WITH_DECIMALS: series_non_null = series_non_null.astype(float) count_unique = len(df_value_counts.index) data = { f'{col}/count': series_non_null.size, f'{col}/count_distinct': count_unique - 1 if np.nan in df_value_counts else count_unique, f'{col}/null_value_rate': 0 if series_cleaned.size == 0 else series_cleaned.isnull().sum() / series_cleaned.size, f'{col}/null_value_count': series_cleaned.isnull().sum(), } if len(series_non_null) == 0: return data dates = None if column_type in NUMBER_TYPES: data[f'{col}/average'] = series_non_null.sum() / len(series_non_null) data[f'{col}/max'] = series_non_null.max() data[f'{col}/median'] = series_non_null.quantile(0.5) data[f'{col}/min'] = series_non_null.min() data[f'{col}/sum'] = series_non_null.sum() elif column_type == DATETIME: dates = pd.to_datetime(series_non_null, utc=True, errors='coerce').dropna() data[f'{col}/max'] = dates.max().isoformat() data[f'{col}/median'] = dates.sort_values().iloc[math.floor(len(dates) / 2)].isoformat() data[f'{col}/min'] = dates.min().isoformat() if column_type not in NUMBER_TYPES: if dates is not None: value_counts = dates.value_counts() else: value_counts = series_non_null.value_counts() mode = value_counts.index[0] if column_type == DATETIME: mode = mode.isoformat() data[f'{col}/mode'] = mode # Detect mismatched formats for some column types data[f'{col}/mismatched_count'] = get_mismatched_row_count(series, column_type) return data mage_ai/data_cleaner/statistics/__init__.py METASEP mage_ai/data_cleaner/shared/utils.py METASEP from data_cleaner.column_type_detector import ( NUMBER, NUMBER_WITH_DECIMALS, ) import numpy as np def clean_series(series, column_type, dropna=True): series_cleaned = series.map( lambda x: x if (not isinstance(x, str) or (len(x) > 0 and not x.isspace())) else np.nan, ) if dropna: series_cleaned = series_cleaned.dropna() if column_type == NUMBER: try: series_cleaned = series_cleaned.astype(float).astype(int) except ValueError: series_cleaned = series_cleaned.astype(float) elif column_type == NUMBER_WITH_DECIMALS: series_cleaned = series_cleaned.astype(float) return series_cleaned mage_ai/data_cleaner/shared/multi.py METASEP from concurrent.futures import ThreadPoolExecutor from threading import Thread MAX_WORKERS = 16 def start_thread(target, **kwargs): thread = Thread( target=target, kwargs=kwargs, ) thread.start() return thread def parallelize(func, arr): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, arr) def parallelize_multiple_args(func, arr_args): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, *zip(*arr_args)) def run_parallel_threads(list_of_funcs_and_args_or_kwargs): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: for func, args in list_of_funcs_and_args_or_kwargs: pool.submit(func, *args) def run_parallel(func, arr_args_1, arr_args_2): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, *arr_args_1, *arr_args_2) mage_ai/data_cleaner/shared/hash.py METASEP from functools import reduce import math import re def dig(obj_arg, arr_or_string): if type(arr_or_string) is str: arr_or_string = arr_or_string.split('.') arr = list(map(str.strip, arr_or_string)) def _build(obj, key): tup = re.split(r'\[(\d+)\]$', key) if len(tup) >= 2: key, index = filter(lambda x: x, tup) if key and index: return obj[key][int(index)] elif index: return obj[int(index)] elif obj: return obj.get(key) else: return obj return reduce(_build, arr, obj_arg) def flatten(input_data): final_data = {} for k1, v1 in input_data.items(): if type(v1) is dict: for k2, v2 in v1.items(): if type(v2) is dict: for k3, v3 in v2.items(): final_data[f'{k1}_{k2}_{k3}'] = v3 else: final_data[f'{k1}_{k2}'] = v2 else: final_data[k1] = v1 return final_data def ignore_keys(d, keys): d_keys = d.keys() d2 = d.copy() for key in keys: if key in d_keys: d2.pop(key) return d2 def ignore_keys_with_blank_values(d): d2 = d.copy() for key, value in d.items(): if not value: d2.pop(key) return d2 def extract(d, keys): def _build(obj, key): val = d.get(key, None) if val is not None: obj[key] = val return obj return reduce(_build, keys, {}) def extract_arrays(input_data): arr = [] for k, v in input_data.items(): if type(v) is list: arr.append(v) return arr def group_by(func, arr): def _build(obj, item): val = func(item) if not obj.get(val): obj[val] = [] obj[val].append(item) return obj return reduce(_build, arr, {}) def index_by(func, arr): obj = {} for item in arr: key = func(item) obj[key] = item return obj def merge_dict(a, b): c = a.copy() c.update(b) return c def replace_dict_nan_value(d): def _replace_nan_value(v): if type(v) == float and math.isnan(v): return None return v return {k: _replace_nan_value(v) for k, v in d.items()} mage_ai/data_cleaner/shared/array.py METASEP import random def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)] def difference(li1, li2): li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2] return li_dif def flatten(arr): return [item for sublist in arr for item in sublist] def find(condition, arr, map=None): try: return next(map(x) if map else x for x in arr if condition(x)) except StopIteration: return None def sample(arr): return arr[random.randrange(0, len(arr))] def subtract(arr1, arr2): return [i for i in arr1 if i not in arr2] mage_ai/data_cleaner/shared/__init__.py METASEP mage_ai/data_cleaner/pipelines/base.py METASEP from data_cleaner.cleaning_rules.remove_columns_with_high_empty_rate \ import RemoveColumnsWithHighEmptyRate from data_cleaner.cleaning_rules.remove_columns_with_single_value \ import RemoveColumnsWithSingleValue from data_cleaner.cleaning_rules.remove_duplicate_rows \ import RemoveDuplicateRows from data_cleaner.transformer_actions.base import BaseAction DEFAULT_RULES = [ RemoveColumnsWithHighEmptyRate, RemoveColumnsWithSingleValue, RemoveDuplicateRows, ] class BasePipeline(): def __init__(self, actions=[]): self.actions = actions self.rules = DEFAULT_RULES def create_actions(self, df, column_types, statistics): all_suggestions = [] for rule in self.rules: suggestions = rule(df, column_types, statistics).evaluate() if suggestions: all_suggestions += suggestions self.actions = all_suggestions return all_suggestions def transform(self, df): if len(self.actions) == 0: print('Pipeline is empty.') return df df_transformed = df for action in self.actions: df_transformed = BaseAction(action['action_payload']).execute(df_transformed) return df_transformed mage_ai/data_cleaner/pipelines/__init__.py METASEP mage_ai/data_cleaner/cleaning_rules/unit_conversion.py METASEP mage_ai/data_cleaner/cleaning_rules/type_conversion.py METASEP mage_ai/data_cleaner/cleaning_rules/remove_outliers.py METASEP mage_ai/data_cleaner/cleaning_rules/remove_duplicate_rows.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveDuplicateRows(BaseRule): def evaluate(self): df_dedupe = self.df.drop_duplicates() duplicate_row_count = self.df.shape[0] - df_dedupe.shape[0] suggestions = [] if duplicate_row_count > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove duplicate rows', f'There\'re {duplicate_row_count} duplicate rows in the dataset. '\ 'Suggest to remove them.', ActionType.DROP_DUPLICATE, action_arguments=[], axis=Axis.ROW, )) return suggestions mage_ai/data_cleaner/cleaning_rules/remove_columns_with_single_value.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveColumnsWithSingleValue(BaseRule): # Check statistic [feature_uuid]/count_distinct def evaluate(self): columns_with_single_value = [] for c in self.df_columns: if f'{c}/count_distinct' not in self.statistics: continue feature_count_distinct = self.statistics[f'{c}/count_distinct'] if feature_count_distinct == 1: columns_with_single_value.append(c) suggestions = [] suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with single value', f'The following columns have single value in all rows: {columns_with_single_value}.'\ ' Suggest to remove them.', ActionType.REMOVE, action_arguments=columns_with_single_value, axis=Axis.COLUMN, )) return suggestions mage_ai/data_cleaner/cleaning_rules/remove_columns_with_high_empty_rate.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveColumnsWithHighEmptyRate(BaseRule): MISSING_RATE_THRESHOLD = 0.8 def evaluate(self): columns_with_missing_values = [] columns_with_no_values = [] for c in self.df_columns: if self.statistics.get(f'{c}/count') == 0: columns_with_no_values.append(c) elif f'{c}/null_value_rate' in self.statistics: null_value_rate = self.statistics[f'{c}/null_value_rate'] if null_value_rate >= self.MISSING_RATE_THRESHOLD: columns_with_missing_values.append(c) suggestions = [] if len(columns_with_no_values) > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with no values', f'The following columns have no values: {columns_with_no_values}.'\ ' Removing them may increase your data quality.', ActionType.REMOVE, action_arguments=columns_with_no_values, axis=Axis.COLUMN, )) if len(columns_with_missing_values) > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with high empty rate', f'The following columns have high empty rate: {columns_with_missing_values}.'\ ' Removing them may increase your data quality.', ActionType.REMOVE, action_arguments=columns_with_missing_values, axis=Axis.COLUMN, )) return suggestions mage_ai/data_cleaner/cleaning_rules/remove_collinear_columns.py METASEP mage_ai/data_cleaner/cleaning_rules/reformat_values.py METASEP mage_ai/data_cleaner/cleaning_rules/impute_values.py METASEP mage_ai/data_cleaner/cleaning_rules/fix_encoding.py METASEP mage_ai/data_cleaner/cleaning_rules/clean_column_names.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.column_type_detector import REGEX_NUMBER from data_cleaner.transformer_actions.constants import ActionType from keyword import iskeyword import re class CleanColumnNames(BaseRule): INVALID_COLUMN_CHARS = re.compile(r'([^a-z\_0-9])') UPPERCASE_PATTERN = re.compile(r'[A-Z]') def evaluate(self): """ Rule: 1. If column name contains an invalid character, suggest cleaning (remove all characters) 2. If column name is a reserved python keyword, suggest cleaning (pad with symbols) 3. If column is of mixedcase, suggest cleaning (convert to lowercase) 4. If column contains only numbers, suggest cleaning (pad with letters) 5. If column contains dashes, convert to underscore """ matches = [] for column in self.df_columns: if self.INVALID_COLUMN_CHARS.search(column) != None: matches.append(column) elif REGEX_NUMBER.search(column) != None: matches.append(column) else: column = column.lower().strip() if column == 'true' or column == 'false': matches.append(column) elif iskeyword(column): matches.append(column) suggestions = [] if len(matches) != 0: suggestions.append(self._build_transformer_action_suggestion( 'Clean dirty column names', 'The following columns have unclean naming conventions: ' f'{matches}. ' 'Making these names lowercase and alphanumeric may improve' 'ease of dataset access and reduce security risks.', action_type=ActionType.CLEAN_COLUMN_NAME, action_arguments=matches, axis='column' )) return suggestions mage_ai/data_cleaner/cleaning_rules/base.py METASEP class BaseRule: def __init__(self, df, column_types, statistics): self.df = df self.df_columns = df.columns.tolist() self.column_types = column_types self.statistics = statistics def evaluate(self): """Evaluate data cleaning rule and generate suggested actions Returns ------- A list of suggested actions """ return [] def _build_transformer_action_suggestion( self, title, message, action_type, action_arguments=[], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ): return dict( title=title, message=message, action_payload=dict( action_type=action_type, action_arguments=action_arguments, action_code=action_code, action_options=action_options, action_variables=action_variables, axis=axis, outputs=outputs, ), ) mage_ai/data_cleaner/cleaning_rules/__init__.py METASEP mage_ai/data_cleaner/analysis/constants.py METASEP CHART_TYPE_BAR_HORIZONTAL = 'bar_horizontal' CHART_TYPE_LINE_CHART = 'line_chart' CHART_TYPE_HISTOGRAM = 'histogram' LABEL_TYPE_RANGE = 'range' DATA_KEY_CHARTS = 'charts' DATA_KEY_CORRELATION = 'correlations' DATA_KEY_OVERVIEW = 'overview' DATA_KEY_TIME_SERIES = 'time_series' DATA_KEYS = [ DATA_KEY_CHARTS, DATA_KEY_CORRELATION, DATA_KEY_OVERVIEW, DATA_KEY_TIME_SERIES, ] mage_ai/data_cleaner/analysis/charts.py METASEP from data_cleaner.analysis.constants import ( CHART_TYPE_BAR_HORIZONTAL, CHART_TYPE_LINE_CHART, CHART_TYPE_HISTOGRAM, DATA_KEY_TIME_SERIES, LABEL_TYPE_RANGE, ) from data_cleaner.shared.utils import clean_series from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ) import dateutil.parser import math import numpy as np import pandas as pd DD_KEY = 'lambda.analysis_charts' BUCKETS = 40 TIME_SERIES_BUCKETS = 40 def increment(metric, tags={}): pass def build_buckets(min_value, max_value, max_buckets, column_type): diff = max_value - min_value total_interval = 1 + diff bucket_interval = total_interval / max_buckets number_of_buckets = max_buckets is_integer = False parts = str(diff).split('.') if len(parts) == 1: is_integer = True else: is_integer = int(parts[1]) == 0 if NUMBER == column_type and total_interval <= max_buckets and is_integer: number_of_buckets = int(total_interval) bucket_interval = 1 elif bucket_interval > 1: bucket_interval = math.ceil(bucket_interval) else: bucket_interval = round(bucket_interval * 100, 1) / 100 buckets = [] for i in range(number_of_buckets): min_v = min_value + (i * bucket_interval) max_v = min_value + ((i + 1) * bucket_interval) buckets.append(dict( max_value=max_v, min_value=min_v, values=[], )) return buckets, bucket_interval def build_histogram_data(col1, series, column_type): increment(f'{DD_KEY}.build_histogram_data.start', dict(feature_uuid=col1)) max_value = series.max() min_value = series.min() buckets, bucket_interval = build_buckets(min_value, max_value, BUCKETS, column_type) if bucket_interval == 0: return for value in series.values: index = math.floor((value - min_value) / bucket_interval) if value == max_value: index = len(buckets) - 1 buckets[index]['values'].append(value) x = [] y = [] for bucket in buckets: x.append(dict( max=bucket['max_value'], min=bucket['min_value'], )) y.append(dict(value=len(bucket['values']))) increment(f'{DD_KEY}.build_histogram_data.succeeded', dict(feature_uuid=col1)) return dict( type=CHART_TYPE_HISTOGRAM, x=x, x_metadata=dict( label_type=LABEL_TYPE_RANGE, ), y=y, ) def build_correlation_data(df, col1, features): increment(f'{DD_KEY}.build_correlation_data.start', dict(feature_uuid=col1)) x = [] y = [] df_copy = df.copy() for feature in features: col2 = feature['uuid'] column_type = feature['column_type'] series = df_copy[col2] df_copy[col2] = clean_series(series, column_type, dropna=False) corr = df_copy.corr() for feature in features: col2 = feature['uuid'] if col1 != col2: value = corr[col1].get(col2, None) if value is not None: x.append(dict(label=col2)) y.append(dict(value=value)) increment(f'{DD_KEY}.build_correlation_data.succeeded', dict(feature_uuid=col1)) return dict( type=CHART_TYPE_BAR_HORIZONTAL, x=x, y=y, ) def build_time_series_data(df, feature, datetime_column, column_type): col1 = feature['uuid'] column_type = feature['column_type'] tags = dict( column_type=column_type, datetime_column=datetime_column, feature_uuid=col1, ) increment(f'{DD_KEY}.build_time_series_data.start', tags) # print(feature, datetime_column) datetimes = clean_series(df[datetime_column], DATETIME) if datetimes.size <= 1: return min_value_datetime = dateutil.parser.parse(datetimes.min()).timestamp() max_value_datetime = dateutil.parser.parse(datetimes.max()).timestamp() buckets, bucket_interval = build_buckets( min_value_datetime, max_value_datetime, TIME_SERIES_BUCKETS, column_type, ) x = [] y = [] df_copy = df.copy() df_copy[datetime_column] = \ pd.to_datetime(df[datetime_column]).apply(lambda x: x if pd.isnull(x) else x.timestamp()) for bucket in buckets: max_value = bucket['max_value'] min_value = bucket['min_value'] series = df_copy[( df_copy[datetime_column] >= min_value ) & ( df_copy[datetime_column] < max_value )][col1] x.append(dict( max=max_value, min=min_value, )) series_cleaned = clean_series(series, column_type, dropna=False) df_value_counts = series_cleaned.value_counts(dropna=False) series_non_null = series_cleaned.dropna() count_unique = len(df_value_counts.index) y_data = dict( count=series_non_null.size, count_distinct=count_unique - 1 if np.nan in df_value_counts else count_unique, null_value_rate=0 if series_cleaned.size == 0 else series_cleaned.isnull().sum() / series_cleaned.size, ) if column_type in [NUMBER, NUMBER_WITH_DECIMALS]: y_data.update(dict( average=series_non_null.sum() / len(series_non_null), max=series_non_null.max(), median=series_non_null.quantile(0.5), min=series_non_null.min(), sum=series_non_null.sum(), )) elif column_type in [CATEGORY, CATEGORY_HIGH_CARDINALITY, TRUE_OR_FALSE]: value_counts = series_non_null.value_counts() if len(value_counts.index): value_counts_top = value_counts.sort_values(ascending=False).iloc[:12] mode = value_counts_top.index[0] y_data.update(dict( mode=mode, value_counts=value_counts_top.to_dict(), )) y.append(y_data) increment(f'{DD_KEY}.build_time_series_data.succeeded', tags) return dict( type=CHART_TYPE_LINE_CHART, x=x, x_metadata=dict( label=datetime_column, label_type=LABEL_TYPE_RANGE, ), y=y, ) def build_overview_data(df, datetime_features): increment(f'{DD_KEY}.build_overview_data.start') time_series = [] df_copy = df.copy() for feature in datetime_features: column_type = feature['column_type'] datetime_column = feature['uuid'] tags = dict(datetime_column=datetime_column) increment(f'{DD_KEY}.build_overview_time_series.start', tags) if clean_series(df_copy[datetime_column], DATETIME).size <= 1: continue df_copy[datetime_column] = \ pd.to_datetime(df[datetime_column]).apply(lambda x: x if pd.isnull(x) else x.timestamp()) min_value1 = df_copy[datetime_column].min() max_value1 = df_copy[datetime_column].max() buckets, bucket_interval = build_buckets(min_value1, max_value1, TIME_SERIES_BUCKETS, column_type) x = [] y = [] for bucket in buckets: max_value = bucket['max_value'] min_value = bucket['min_value'] df_filtered = df_copy[( df_copy[datetime_column] >= min_value ) & ( df_copy[datetime_column] < max_value )] x.append(dict( max=max_value, min=min_value, )) y.append(dict( count=len(df_filtered.index), )) time_series.append(dict( type=CHART_TYPE_LINE_CHART, x=x, x_metadata=dict( label=datetime_column, label_type=LABEL_TYPE_RANGE, ), y=y, )) increment(f'{DD_KEY}.build_overview_time_series.succeeded', tags) increment(f'{DD_KEY}.build_overview_data.succeeded') return { DATA_KEY_TIME_SERIES: time_series, } mage_ai/data_cleaner/analysis/calculator.py METASEP from data_cleaner.analysis import charts from data_cleaner.analysis.constants import ( DATA_KEY_CHARTS, DATA_KEY_CORRELATION, DATA_KEY_TIME_SERIES, ) from data_cleaner.shared.utils import clean_series from data_cleaner.shared.hash import merge_dict from data_cleaner.shared.multi import run_parallel from data_cleaner.transformer_actions import constants from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ) DD_KEY = 'lambda.analysis_calculator' def increment(metric, tags={}): pass class AnalysisCalculator(): def __init__( self, df, column_types, **kwargs, ): self.df = df self.column_types = column_types self.features = [{'uuid': col, 'column_type': column_types.get(col)} for col in df.columns] def process(self, df): increment(f'{DD_KEY}.process.start', self.tags) df_columns = df.columns features_to_use = self.features datetime_features_to_use = [f for f in self.datetime_features if f['uuid'] in df_columns] arr_args_1 = [df for _ in features_to_use], arr_args_2 = features_to_use, data_for_columns = [d for d in run_parallel(self.calculate_column, arr_args_1, arr_args_2)] overview = charts.build_overview_data( df, datetime_features_to_use, ) correlation_overview = [] for d in data_for_columns: corr = d.get(DATA_KEY_CORRELATION) if corr: correlation_overview.append({ 'feature': d['feature'], DATA_KEY_CORRELATION: corr, }) increment(f'{DD_KEY}.process.succeeded', self.tags) return data_for_columns, merge_dict(overview, { DATA_KEY_CORRELATION: correlation_overview, }) @property def features_by_uuid(self): data = {} for feature in self.features: data[feature['uuid']] = feature return data @property def datetime_features(self): return [f for f in self.features if f['column_type'] == DATETIME] @property def tags(self): return dict() def calculate_column(self, df, feature): df_columns = df.columns features_to_use = [f for f in self.features if f['uuid'] in df_columns] datetime_features_to_use = [f for f in self.datetime_features if f['uuid'] in df_columns] col = feature['uuid'] column_type = feature['column_type'] tags = merge_dict(self.tags, dict(column_type=column_type, feature_uuid=col)) increment(f'{DD_KEY}.calculate_column.start', tags) series = df[col] series_cleaned = clean_series(series, column_type) chart_data = [] correlation = [] time_series = [] if column_type in [NUMBER, NUMBER_WITH_DECIMALS]: histogram_data = charts.build_histogram_data(col, series_cleaned, column_type) if histogram_data: chart_data.append(histogram_data) correlation.append(charts.build_correlation_data(df, col, features_to_use)) if column_type in [ CATEGORY, CATEGORY_HIGH_CARDINALITY, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ]: time_series = [] for f in datetime_features_to_use: time_series_chart = charts.build_time_series_data(df, feature, f['uuid'], column_type) if time_series_chart: time_series.append(time_series_chart) increment(f'{DD_KEY}.calculate_column.succeeded', tags) return { 'feature': feature, DATA_KEY_CHARTS: chart_data, DATA_KEY_CORRELATION: correlation, DATA_KEY_TIME_SERIES: time_series, } mage_ai/data_cleaner/analysis/__init__.py METASEP cleaning/__init__.py METASEP mage_ai/server/hello.py METASEP from flask import Flask app = Flask(__name__) @app.route("/") def hello_world(): return "<p>Hello, World!</p>" mage_ai/data_cleaner/data_cleaner.py METASEP from data_cleaner import column_type_detector from data_cleaner.analysis.calculator import AnalysisCalculator from data_cleaner.pipelines.base import BasePipeline from data_cleaner.shared.hash import merge_dict from data_cleaner.statistics.calculator import StatisticsCalculator def clean(df): cleaner = DataCleaner() return cleaner.clean(df) class DataCleaner(): def analyze(self, df): """ Analyze a dataframe 1. Detect column types 2. Calculate statisitics 3. Calculate analysis """ column_types = column_type_detector.infer_column_types(df) statistics = StatisticsCalculator(column_types).process(df) analysis = AnalysisCalculator(df, column_types).process(df) return dict( analysis=analysis, column_types=column_types, statistics=statistics, ) def clean(self, df): df_stats = self.analyze(df) pipeline = BasePipeline() suggested_actions = pipeline.create_actions( df, df_stats['column_types'], df_stats['statistics'], ) df_cleaned = pipeline.transform(df) return merge_dict(df_stats, dict( df_cleaned=df_cleaned, suggested_actions=suggested_actions, )) mage_ai/data_cleaner/column_type_detector.py METASEP from data_cleaner.shared.array import subtract import numpy as np import re import warnings DATETIME_MATCHES_THRESHOLD = 0.5 MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES = 40 CATEGORY = 'category' CATEGORY_HIGH_CARDINALITY = 'category_high_cardinality' DATETIME = 'datetime' EMAIL = 'email' NUMBER = 'number' NUMBER_WITH_DECIMALS = 'number_with_decimals' PHONE_NUMBER = 'phone_number' TEXT = 'text' TRUE_OR_FALSE = 'true_or_false' ZIP_CODE = 'zip_code' NUMBER_TYPES = [NUMBER, NUMBER_WITH_DECIMALS] STRING_TYPES = [EMAIL, PHONE_NUMBER, TEXT, ZIP_CODE] COLUMN_TYPES = [ CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, EMAIL, NUMBER, NUMBER_WITH_DECIMALS, PHONE_NUMBER, TEXT, TRUE_OR_FALSE, ZIP_CODE, ] REGEX_DATETIME_PATTERN = r'^[\d]{2,4}-[\d]{1,2}-[\d]{1,2}$|^[\d]{2,4}-[\d]{1,2}-[\d]{1,2}[Tt ]{1}[\d]{1,2}:[\d]{1,2}[:]{0,1}[\d]{1,2}[\.]{0,1}[\d]*|^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$|^\d{1,4}[-\/]{1}\d{1,2}[-\/]{1}\d{1,4}$|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+(\d{1,2})[\s,]+(\d{2,4})' REGEX_EMAIL_PATTERN = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$" REGEX_EMAIL = re.compile(REGEX_EMAIL_PATTERN) REGEX_INTEGER_PATTERN = r'^[\-]{0,1}[\$]{0,1}[0-9,]+$' REGEX_INTEGER = re.compile(REGEX_INTEGER_PATTERN) REGEX_NUMBER_PATTERN = r'^[\-]{0,1}[\$]{0,1}[0-9,]+\.[0-9]*%{0,1}$|^[\-]{0,1}[\$]{0,1}[0-9,]+%{0,1}$' REGEX_NUMBER = re.compile(REGEX_NUMBER_PATTERN) REGEX_PHONE_NUMBER_PATTERN = r'^\s*(?:\+?(\d{1,3}))?[-. (]*(\d{3})[-. )]*(\d{3})[-. ]*(\d{4})(?: *x(\d+))?\s*$' REGEX_PHONE_NUMBER = re.compile(REGEX_PHONE_NUMBER_PATTERN) REGEX_ZIP_CODE_PATTERN = r'^\d{3,5}(?:[-\s]\d{4})?$' REGEX_ZIP_CODE = re.compile(REGEX_ZIP_CODE_PATTERN) def get_mismatched_row_count(series, column_type): mismatched_rows = 0 if column_type == EMAIL: mismatched_rows = len( series[~series.str.contains(REGEX_EMAIL)].index, ) elif column_type == PHONE_NUMBER: mismatched_rows = len( series[~series.str.contains(REGEX_PHONE_NUMBER)].index, ) elif column_type == ZIP_CODE: mismatched_rows = len( series[~series.str.contains(REGEX_ZIP_CODE)].index, ) return mismatched_rows def infer_column_types(df, **kwargs): binary_feature_names = [] category_feature_names = [] datetime_feature_names = [] email_features = [] float_feature_names = [] integer_feature_names = [] non_number_feature_names = [] phone_number_feature_names = [] text_feature_names = [] zip_code_feature_names = [] for idx, col_type in enumerate(df.dtypes): col_name = df.columns[idx] if 'datetime64' in str(col_type): datetime_feature_names.append(col_name) elif col_type == 'object': df_sub = df[col_name].copy() df_sub = df_sub.replace('^\s+$', np.nan, regex=True) df_sub = df_sub.dropna() df_sub = df_sub.apply(lambda x: x.strip() if type(x) is str else x) if df_sub.empty: non_number_feature_names.append(col_name) else: first_item = df_sub.iloc[0] if type(first_item) is list: text_feature_names.append(col_name) elif type(first_item) is bool or type(first_item) is np.bool_: if len(df[col_name].unique()) <= 2: binary_feature_names.append(col_name) else: category_feature_names.append(col_name) elif len(df[col_name].unique()) <= 2: binary_feature_names.append(col_name) else: df_sub = df_sub.astype(str) incorrect_emails = len( df_sub[~df_sub.str.contains(REGEX_EMAIL)].index, ) warnings.filterwarnings('ignore', 'This pattern has match groups') incorrect_phone_numbers = len( df_sub[~df_sub.str.contains(REGEX_PHONE_NUMBER)].index, ) incorrect_zip_codes = len( df_sub[~df_sub.str.contains(REGEX_ZIP_CODE)].index, ) if all(df_sub.str.contains(REGEX_INTEGER)): integer_feature_names.append(col_name) elif all(df_sub.str.contains(REGEX_NUMBER)): float_feature_names.append(col_name) elif incorrect_emails / len(df_sub.index) <= 0.99: email_features.append(col_name) elif incorrect_phone_numbers / len(df_sub.index) <= 0.99: phone_number_feature_names.append(col_name) elif incorrect_zip_codes / len(df_sub.index) <= 0.99: zip_code_feature_names.append(col_name) else: non_number_feature_names.append(col_name) elif col_type == 'bool': binary_feature_names.append(col_name) elif np.issubdtype(col_type, np.floating): float_feature_names.append(col_name) elif np.issubdtype(col_type, np.integer): df_sub = df[col_name].copy() df_sub = df_sub.dropna() if df_sub.min() >= 100 and df_sub.max() <= 99999 and 'zip' in col_name.lower(): zip_code_feature_names.append(col_name) else: integer_feature_names.append(col_name) number_feature_names = float_feature_names + integer_feature_names binary_feature_names += \ [col for col in number_feature_names if df[col].nunique(dropna=False) == 2] binary_feature_names += \ [col for col in non_number_feature_names if df[col].nunique(dropna=False) == 2] float_feature_names = [col for col in float_feature_names if col not in binary_feature_names] integer_feature_names = \ [col for col in integer_feature_names if col not in binary_feature_names] for col_name in subtract(non_number_feature_names, binary_feature_names): df_drop_na = df[col_name].dropna() if df_drop_na.empty: text_feature_names.append(col_name) else: matches = df_drop_na.astype(str).str.contains(REGEX_DATETIME_PATTERN) matches = matches.where(matches == True).dropna() if type(df_drop_na.iloc[0]) is list: text_feature_names.append(col_name) elif len(df_drop_na[matches.index]) / len(df_drop_na) >= DATETIME_MATCHES_THRESHOLD: datetime_feature_names.append(col_name) elif df_drop_na.nunique() / len(df_drop_na) >= 0.8: text_feature_names.append(col_name) else: word_count, _ = \ df[col_name].dropna().map(lambda x: (len(str(x).split(' ')), str(x))).max() if word_count > MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES: text_feature_names.append(col_name) else: category_feature_names.append(col_name) low_cardinality_category_feature_names = \ [col for col in category_feature_names if df[col].nunique() <= kwargs.get( 'category_cardinality_threshold', 255, )] high_cardinality_category_feature_names = \ [col for col in category_feature_names if col not in low_cardinality_category_feature_names] column_types = {} array_types_mapping = { CATEGORY: low_cardinality_category_feature_names, CATEGORY_HIGH_CARDINALITY: high_cardinality_category_feature_names, DATETIME: datetime_feature_names, EMAIL: email_features, NUMBER: integer_feature_names, NUMBER_WITH_DECIMALS: float_feature_names, PHONE_NUMBER: phone_number_feature_names, TEXT: text_feature_names, TRUE_OR_FALSE: binary_feature_names, ZIP_CODE: zip_code_feature_names, } for col_type, arr in array_types_mapping.items(): for col in arr: column_types[col] = col_type return column_types mage_ai/data_cleaner/__init__.py METASEP mage_ai/__init__.py METASEP mage_ai/tests/data_cleaner/transformer_actions/test_variable_replacer.py METASEP from data_cleaner.transformer_actions.variable_replacer import interpolate, replace_true_false from tests.base_test import TestCase from tests.data_cleaner.transformer_actions.shared import TEST_ACTION class VariableReplacerTests(TestCase): def test_interpolate(self): text = TEST_ACTION['action_code'] key1 = '1_1' variable_data1 = TEST_ACTION['action_variables'][key1] key2 = '1_2' variable_data2 = TEST_ACTION['action_variables'][key2] key3 = '1' variable_data3 = TEST_ACTION['action_variables'][key3] self.assertEqual( interpolate( interpolate(interpolate(text, key1, variable_data1), key2, variable_data2), key3, variable_data3, ), 'omni.deposited == True and (omni.fund == "The Quant" or omni.fund == "Yield")', ) def test_replace_true_false(self): action_code = 'a == false and b == true or (a == true and b == false) and ' \ 'a == False and b == True or a == "true" and b == "false" or ' \ "a == 'false' and b == 'true' or a == 'True' and b == 'False'" result = 'a == False and b == True or (a == True and b == False) and ' \ 'a == False and b == True or a == "true" and b == "false" or ' \ "a == 'false' and b == 'true' or a == 'True' and b == 'False'" self.assertEqual(replace_true_false(action_code), result) mage_ai/tests/data_cleaner/transformer_actions/test_utils.py METASEP from data_cleaner.transformer_actions.constants import ActionType, Axis from data_cleaner.transformer_actions.utils import columns_to_remove from tests.base_test import TestCase class UtilsTests(TestCase): def test_columns_to_remove(self): transformer_actions = [ dict( action_type=ActionType.FILTER, axis=Axis.COLUMN, action_arguments=['wand'], ), dict( action_type=ActionType.REMOVE, axis=Axis.ROW, action_arguments=['spear'], ), dict( action_type=ActionType.REMOVE, axis=Axis.COLUMN, action_arguments=['sword'], ), ] self.assertEqual(columns_to_remove(transformer_actions), ['sword']) mage_ai/tests/data_cleaner/transformer_actions/test_row.py METASEP from data_cleaner.transformer_actions.base import BaseAction from data_cleaner.transformer_actions.row import ( drop_duplicates, # explode, filter_rows, sort_rows, ) from pandas.util.testing import assert_frame_equal from tests.base_test import TestCase import numpy as np import pandas as pd class RowTests(TestCase): def test_drop_duplicates(self): df = pd.DataFrame([ [0, False, 'a'], [1, True, 'b'], [1, True, 'c'], [0, True, 'd'], [1, True, 'b'], ], columns=[ 'integer', 'boolean', 'string', ]) test_cases = [ (dict(action_arguments=['integer']), df.iloc[[3, 4]]), (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]), (dict(action_arguments=['boolean']), df.iloc[[0, 4]]), (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]), (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 3, 4]]), (dict(action_arguments=[]), df.iloc[[0, 2, 3, 4]]), ] for action, val in test_cases: self.assertTrue(drop_duplicates(df, action).equals(val)) # def test_explode(self): # df = pd.DataFrame([ # ['(a, b, c)'], # ['[b, c, d]'], # [' e, f '], # ], columns=['tags']) # action = dict( # action_arguments=['tags'], # action_options={ # 'separator': ',', # }, # outputs=[ # dict( # uuid='tag', # column_type='text', # ), # ], # ) # df_new = explode(df, action) # df_expected = pd.DataFrame([ # ['a', '(a, b, c)'], # ['b', '(a, b, c)'], # ['c', '(a, b, c)'], # ['b', '[b, c, d]'], # ['c', '[b, c, d]'], # ['d', '[b, c, d]'], # ['e', ' e, f '], # ['f', ' e, f '], # ], columns=['tag', 'tags']) # assert_frame_equal(df_new.reset_index(drop=True), df_expected) def test_filter_rows(self): df = pd.DataFrame([ [0, False, 'a'], [1, True, 'b'], ], columns=[ 'integer', 'boolean', 'string', ]) test_cases = [ ([0, False, 'a'], 'integer == 0'), ([0, False, 'a'], 'string == \'a\''), ([1, True, 'b'], 'boolean == True'), ([1, True, 'b'], 'integer >= 1'), ([1, True, 'b'], 'integer >= 1 and boolean == True'), ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \'b\')'), ] for val, query in test_cases: self.assertEqual( val, filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(), ) def test_filter_rows_is_null(self): df = pd.DataFrame([ [None, False, 'a'], [2, True, 'b'], [3, False, 'c'], [1, None, 'a'], [2, True, 'b'], [3, '', 'c'], [1, False, None], [2, True, 'b'], [3, False, ''], ], columns=[ 'integer', 'boolean', 'string', ]) integer_rows = filter_rows( df, dict(action_code='integer == null'), original_df=df, ).values.tolist() self.assertEqual(len(integer_rows), 1) self.assertEqual(integer_rows[0][1], False) self.assertEqual(integer_rows[0][2], 'a') boolean_rows = filter_rows( df, dict(action_code='boolean == null'), original_df=df, ).values.tolist() self.assertEqual(len(boolean_rows), 2) self.assertEqual(boolean_rows[0][0], 1.0) self.assertEqual(boolean_rows[0][1], None) self.assertEqual(boolean_rows[0][2], 'a') self.assertEqual(boolean_rows[1][0], 3.0) self.assertEqual(boolean_rows[1][1], '') self.assertEqual(boolean_rows[1][2], 'c') string_rows = filter_rows( df, dict(action_code='string == null'), original_df=df, ).values.tolist() self.assertEqual(len(string_rows), 2) self.assertEqual(string_rows[0][0], 1.0) self.assertEqual(string_rows[0][1], False) self.assertEqual(string_rows[0][2], None) self.assertEqual(string_rows[1][0], 3.0) self.assertEqual(string_rows[1][1], False) self.assertEqual(string_rows[1][2], '') def test_filter_rows_is_not_null(self): df = pd.DataFrame([ [None, False, 'a'], [2, True, 'b'], [3, False, 'c'], [1, None, 'a'], [2, True, 'b'], [3, '', 'c'], [1, False, None], [2, True, 'b'], [3, False, ''], ], columns=[ 'integer', 'boolean', 'string', ]) integer_rows = filter_rows( df, dict(action_code='integer != null'), original_df=df, )['integer'].values.tolist() self.assertEqual(integer_rows, [ 2, 3, 1, 2, 3, 1, 2, 3, ]) boolean_rows = filter_rows( df, dict(action_code='boolean != null'), original_df=df, )['boolean'].values.tolist() self.assertEqual(boolean_rows, [ False, True, False, True, False, True, False, ]) string_rows = filter_rows( df, dict(action_code='string != null'), original_df=df, )['string'].values.tolist() self.assertEqual(string_rows, [ 'a', 'b', 'c', 'a', 'b', 'c', 'b', ]) def test_filter_row_contains_string(self): df = pd.DataFrame([ ['fsdijfosidjfiosfj'], ['[email protected]'], [np.NaN], ['fsdfsdfdsfdsf'], ['[email protected]'], ], columns=[ 'id', ]) action = dict( action_code='id contains @', ) action2 = dict( action_code='id contains \'@\'', ) df_new = filter_rows(df, action, original_df=df).reset_index(drop=True) df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True) df_expected = pd.DataFrame([ ['[email protected]'], ['[email protected]'], ], columns=[ 'id', ]) assert_frame_equal(df_new, df_expected) assert_frame_equal(df_new2, df_expected) def test_filter_row_not_contains_string(self): df = pd.DataFrame([ [np.NaN, False], ['[email protected]', True], ['[email protected]', True], ['fsdfsdfdsfdsf', False], ['[email protected]', False], ['eeeeasdf', True] ], columns=[ 'email', 'subscription' ]) action = dict( action_code='email not contains mailnet', ) action2 = dict( action_code='email not contains \'mailnet\'', ) action3 = dict( action_code = 'email not contains @', ) action4 = dict( action_code = 'email not contains \'^e+\w\'', ) action_invalid = dict( action_code='subscription not contains False' ) df_new = filter_rows(df, action, original_df=df).reset_index(drop=True) df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True) df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True) df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True) df_expected1 = pd.DataFrame([ [np.NaN, False], ['[email protected]', True], ['fsdfsdfdsfdsf', False], ['eeeeasdf', True] ], columns=[ 'email', 'subscription' ]) df_expected2 = pd.DataFrame([ [np.NaN, False], ['fsdfsdfdsfdsf', False], ['eeeeasdf', True] ], columns=[ 'email', 'subscription' ]) df_expected3 = pd.DataFrame([ [np.NaN, False], ['[email protected]', True], ['[email protected]', True], ['fsdfsdfdsfdsf', False], ['[email protected]', False] ], columns=[ 'email', 'subscription' ]) assert_frame_equal(df_new, df_expected1) assert_frame_equal(df_new2, df_expected1) assert_frame_equal(df_new3, df_expected2) assert_frame_equal(df_new4, df_expected3) with self.assertRaises(Exception): _ = filter_rows(df, action_invalid, original_df=df).reset_index(drop=True) def test_filter_rows_multi_condition(self): df = pd.DataFrame( [ [100, None, '', 10], [250, 'brand1', False, np.NaN], [np.NaN, 'brand2', None, 18], [50, 'brand1', True, 13], [75, '', '', 80], [None, 'company3', False, 23], ], columns=['value', 'brand', 'discounted', 'inventory'] ) action = dict(action_code='(value < 110 and value >= 50) and (value != null)') action2 = dict(action_code='brand contains brand and inventory != null') action3 = dict(action_code='(brand != null and value > 60) or (discounted == null)') action4 = dict( action_code='(discounted == True and inventory > 15)' ' or (discounted == False and value != null)' ) action5 = dict( action_code='(brand not contains company and value == 75 and inventory <= 80)' ' or (discounted != null)' ) df_expected = pd.DataFrame( [ [100, None, '', 10], [50, 'brand1', True, 13], [75, '', '', 80], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_expected2 = pd.DataFrame( [ [np.NaN, 'brand2', None, 18], [50, 'brand1', True, 13], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_expected3 = pd.DataFrame( [ [100, None, '', 10], [250, 'brand1', False, np.NaN], [np.NaN, 'brand2', None, 18], [75, '', '', 80], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_expected4 = pd.DataFrame( [ [250, 'brand1', False, np.NaN], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_expected5 = pd.DataFrame( [ [250, 'brand1', False, np.NaN], [50, 'brand1', True, 13], [75, '', '', 80], [None, 'company3', False, 23], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_new = filter_rows(df, action, original_df=df).reset_index(drop=True) df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True) df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True) df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True) df_new5 = filter_rows(df, action5, original_df=df).reset_index(drop=True) df_new['value'] = df_new['value'].astype(int) df_new['inventory'] = df_new['inventory'].astype(int) df_new2['brand'] = df_new2['brand'].astype(str) df_new2['inventory'] = df_new2['inventory'].astype(int) df_new4['value'] = df_new4['value'].astype(int) df_new4['brand'] = df_new4['brand'].astype(str) df_new4['discounted'] = df_new4['discounted'].astype(bool) assert_frame_equal(df_expected, df_new) assert_frame_equal(df_expected2, df_new2) assert_frame_equal(df_expected3, df_new3) assert_frame_equal(df_expected4, df_new4) assert_frame_equal(df_expected5, df_new5) def test_filter_row_implicit_null(self): # tests that implicit null values in the transformed dataframe are still removed df = pd.DataFrame( [ [100, None, '', 10], [250, 'brand1', False, np.NaN], [np.NaN, 'brand2', None, 18], [50, 'brand1', True, 13], [75, '', '', 80], [None, 'company3', False, 23], ], columns=['value', 'brand', 'discounted', 'inventory'] ) action_payload = { 'action_type': 'filter', 'action_code': '%{1} != null', 'action_arguments': [], 'action_options': {}, 'axis': 'row', 'action_variables': { '1': { 'id': 'value', 'type': 'feature', 'feature': { 'column_type': 'number', 'uuid': 'value' } }, }, 'outputs': [] } action = BaseAction(action_payload) df_new = action.execute(df).reset_index(drop=True) df_expected = pd.DataFrame( [ [100, None, '', 10], [250, 'brand1', False, np.NaN], [50, 'brand1', True, 13], [75, '', '', 80], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_new['value'] = df_new['value'].astype(int) assert_frame_equal(df_expected, df_new) def test_original_df_column_name_padding(self): # tests edge cases for when columns with the special prefixes "orig_" and "tf_" are given as input df = pd.DataFrame([ [0,1, None], [1,2, np.NaN], [np.NaN, 3, 4], [3, None, 5] ], columns=[ 'col', 'orig_col', 'tf_col' ]) df_expected = pd.DataFrame([ [0,1, None], [1,2, np.NaN], ], columns=[ 'col', 'orig_col', 'tf_col' ]) action = dict(action_code='(col != null) and (orig_col != null)') df_new = filter_rows(df, action, original_df = df) df_new['col'] = df_new['col'].astype(int) df_new['orig_col'] = df_new['orig_col'].astype(int) assert_frame_equal(df_new, df_expected) def test_sort_rows(self): df = pd.DataFrame([ [0, False, 'a'], [1, True, 'b'], [1, True, 'c'], [0, True, 'd'], ], columns=[ 'integer', 'boolean', 'string', ]) test_cases = [ (dict(action_arguments=['integer']), df.iloc[[0, 3, 1, 2]]), (dict(action_arguments=['integer'], action_options=dict(ascending=False)), df.iloc[[1, 2, 0, 3]]), (dict(action_arguments=['string']), df.iloc[[0, 1, 2, 3]]), (dict(action_arguments=['string'], action_options=dict(ascending=False)), df.iloc[[3, 2, 1, 0]]), ] for action, val in test_cases: self.assertTrue(sort_rows(df, action).equals(val)) def test_sort_rows_with_multiple_columns(self): df = pd.DataFrame([ [0, False, 'a'], [1, True, 'b'], [1, True, 'c'], [0, True, 'd'], ], columns=[ 'integer', 'boolean', 'string', ]) test_cases = [ (dict(action_arguments=['integer', 'string']), df.iloc[[0, 3, 1, 2]]), (dict(action_arguments=['integer', 'string'], action_options=dict(ascendings=[False, False])), df.iloc[[2, 1, 3, 0]]), (dict(action_arguments=['integer', 'string'], action_options=dict(ascendings=[True, False])), df.iloc[[3, 0, 2, 1]]), (dict(action_arguments=['string', 'integer'], action_options=dict(ascending=False)), df.iloc[[3, 2, 1, 0]]), ] for action, val in test_cases: self.assertTrue(sort_rows(df, action).equals(val)) def test_sort_rows_with_number_and_empty_strings(self): df = pd.DataFrame([ [0], [None], [3], [''], [1], [2], ], columns=[ 'integer', ]) test_cases = [ (dict(ascending=True), df.iloc[[1, 3, 0, 4, 5, 2]]), (dict(ascending=False), df.iloc[[2, 5, 4, 0, 1, 3]]), ] for action_options, val in test_cases: action = dict( action_arguments=['integer'], action_variables={ '1': dict( feature=dict( column_type='number', uuid='integer', ), ), }, action_options=action_options, ) self.assertTrue(sort_rows(df, action).equals(val)) mage_ai/tests/data_cleaner/transformer_actions/test_helpers.py METASEP from data_cleaner.transformer_actions.helpers import extract_join_feature_set_version_id from tests.base_test import TestCase class ColumnTests(TestCase): def test_extract_join_feature_set_version_id(self): payload1 = dict( action_type='join', action_arguments=[100], action_options=dict( left_on=['user_id'], right_on=['id'], ), ) payload2 = dict( action_type='join', action_arguments=['%{1}'], action_options=dict( left_on=['user_id'], right_on=['id'], ), action_variables={ '1': { 'id': 200, 'type': 'feature_set_version', }, }, ) payload3 = dict( action_type='filter', ) fsv_id1 = extract_join_feature_set_version_id(payload1) fsv_id2 = extract_join_feature_set_version_id(payload2) fsv_id3 = extract_join_feature_set_version_id(payload3) self.assertEqual(fsv_id1, 100) self.assertEqual(fsv_id2, 200) self.assertEqual(fsv_id3, None) mage_ai/tests/data_cleaner/transformer_actions/test_column.py METASEP from data_cleaner.transformer_actions.column import ( add_column, count, count_distinct, clean_column_name, diff, # expand_column, first, last, remove_column, select, shift_down, shift_up, ) from pandas.util.testing import assert_frame_equal from tests.base_test import TestCase import numpy as np import pandas as pd TEST_DATAFRAME = pd.DataFrame([ [1, 1000], [2, 1050], [1, 1100], [2, 1150], ], columns=[ 'group_id', 'amount', ]) class ColumnTests(TestCase): def test_remove_column(self): df = pd.DataFrame([ [0, False, 'a'], [1, True, 'b'], ], columns=[ 'integer', 'boolean', 'string', ]) action = dict(action_arguments=['string']) df_new = remove_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( integer=0, boolean=False, ), dict( integer=1, boolean=True, ), ]) action = dict(action_arguments=['integer', 'boolean']) df_new = remove_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( string='a', ), dict( string='b', ), ]) def test_add_column_addition(self): df = pd.DataFrame([ [1, 3, 7, 9], [4, 2, 9, 3], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', ]) action1 = dict( action_arguments=[ 'integer1', 'integer2', 'integer3', ], action_options={ 'udf': 'addition', 'value': None, }, outputs=[ dict( uuid='integer_addition', column_type='number', ), ], ) action2 = dict( action_arguments=['integer1'], action_options={ 'udf': 'addition', 'value': 10, }, outputs=[ dict( uuid='integer_addition2', column_type='number', ), ], ) action3 = dict( action_arguments=['integer1', 'integer4'], action_options={ 'udf': 'addition', 'value': 10, }, outputs=[ dict( uuid='integer_addition3', column_type='number', ), ], ) df_new = add_column( add_column( add_column(df, action1), action2, ), action3, ) df_expected = pd.DataFrame([ [1, 3, 7, 9, 11, 11, 20], [4, 2, 9, 3, 15, 14, 17], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', 'integer_addition', 'integer_addition2', 'integer_addition3', ]) assert_frame_equal(df_new, df_expected) def test_add_column_addition_days(self): df = pd.DataFrame([ ['2021-08-31'], ['2021-08-28'], ], columns=[ 'created_at', ]) action = dict( action_arguments=['created_at'], action_options=dict( column_type='datetime', time_unit='d', udf='addition', value=3, ), outputs=[ dict( uuid='3d_after_creation', column_type='text', ), ], ) df_new = add_column(df, action) df_expected = pd.DataFrame([ ['2021-08-31', '2021-09-03 00:00:00'], ['2021-08-28', '2021-08-31 00:00:00'], ], columns=[ 'created_at', '3d_after_creation' ]) assert_frame_equal(df_new, df_expected) def test_add_column_constant(self): df = pd.DataFrame([ [False], [True], ], columns=[ 'boolean', ]) action = dict( action_arguments=[10], action_options=dict( udf='constant', ), outputs=[ dict( uuid='integer', column_type='number', ), ], ) df_new = add_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( boolean=False, integer=10, ), dict( boolean=True, integer=10, ), ]) def test_add_column_date_trunc(self): df = pd.DataFrame([ ['2021-08-31', False], ['2021-08-28', True], ], columns=[ 'created_at', 'boolean', ]) action = dict( action_arguments=['created_at'], action_options=dict( udf='date_trunc', date_part='week', ), outputs=[ dict( uuid='week_date', column_type='text', ), ], ) df_new = add_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( created_at='2021-08-31', boolean=False, week_date='2021-08-30', ), dict( created_at='2021-08-28', boolean=True, week_date='2021-08-23', ), ]) def test_add_column_difference(self): df = pd.DataFrame([ [1, 3], [4, 2], ], columns=[ 'integer1', 'integer2', ]) action1 = dict( action_arguments=['integer1', 'integer2'], action_options={ 'udf': 'difference', }, outputs=[ dict( uuid='integer_difference', column_type='number', ), ], ) action2 = dict( action_arguments=['integer1'], action_options={ 'udf': 'difference', 'value': 10, }, outputs=[ dict( uuid='integer_difference2', column_type='number', ), ], ) df_new = add_column(add_column(df, action1), action2) df_expected = pd.DataFrame([ [1, 3, -2, -9], [4, 2, 2, -6], ], columns=[ 'integer1', 'integer2', 'integer_difference', 'integer_difference2' ]) assert_frame_equal(df_new, df_expected) def test_add_column_difference_days(self): df = pd.DataFrame([ ['2021-08-31', '2021-09-14'], ['2021-08-28', '2021-09-03'], ], columns=[ 'created_at', 'converted_at', ]) action = dict( action_arguments=['converted_at', 'created_at'], action_options=dict( column_type='datetime', time_unit='d', udf='difference', ), outputs=[ dict( uuid='days_diff', column_type='number', ), ], ) df_new = add_column(df, action) df_expected = pd.DataFrame([ ['2021-08-31', '2021-09-14', 14], ['2021-08-28', '2021-09-03', 6], ], columns=[ 'created_at', 'converted_at', 'days_diff', ]) assert_frame_equal(df_new, df_expected) def test_add_column_distance_between(self): df = pd.DataFrame([ [26.05308, -97.31838, 33.41939, -112.32606], [39.71954, -84.13056, 33.41939, -112.32606], ], columns=[ 'lat1', 'lng1', 'lat2', 'lng2', ]) action = dict( action_arguments=['lat1', 'lng1', 'lat2', 'lng2'], action_options=dict( udf='distance_between', ), outputs=[ dict( uuid='distance', column_type='number_with_decimals', ), ], ) df_new = add_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( lat1=26.05308, lng1=-97.31838, lat2=33.41939, lng2=-112.32606, distance=1661.8978520305657, ), dict( lat1=39.71954, lng1=-84.13056, lat2=33.41939, lng2=-112.32606, distance=2601.5452571116184, ), ]) def test_add_column_divide(self): df = pd.DataFrame([ [12, 3, 70, 9], [4, 2, 90, 3], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', ]) action1 = dict( action_arguments=[ 'integer1', 'integer2', ], action_options={ 'udf': 'divide', }, outputs=[ dict( uuid='integer_divide', column_type='number', ), ], ) action2 = dict( action_arguments=['integer3'], action_options={ 'udf': 'divide', 'value': 10, }, outputs=[ dict( uuid='integer_divide2', column_type='number', ), ], ) df_new = add_column(add_column(df, action1), action2) df_expected = pd.DataFrame([ [12, 3, 70, 9, 4, 7], [4, 2, 90, 3, 2, 9], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', 'integer_divide', 'integer_divide2' ]) assert_frame_equal(df_new, df_expected) # def test_add_column_extract_dict_string(self): # df = pd.DataFrame([ # '{\'country\': \'US\', \'age\': \'20\'}', # '{\'country\': \'CA\'}', # '{\'country\': \'UK\', \'age\': \'24\'}', # '', # ], columns=[ # 'properties', # ]) # action = dict( # action_arguments=['properties', 'country'], # action_options=dict( # udf='extract_dict_value', # ), # outputs=[ # dict( # uuid='property_country', # column_type='text', # ), # ], # ) # df_new = add_column(df, action) # self.assertEqual(df_new.to_dict(orient='records'), [ # dict( # properties='{\'country\': \'US\', \'age\': \'20\'}', # property_country='US', # ), # dict( # properties='{\'country\': \'CA\'}', # property_country='CA', # ), # dict( # properties='{\'country\': \'UK\', \'age\': \'24\'}', # property_country='UK', # ), # dict( # properties='', # property_country=np.NaN, # ), # ]) # action2 = dict( # action_arguments=['properties', 'age'], # action_options=dict( # udf='extract_dict_value', # ), # outputs=[ # dict( # uuid='property_age', # column_type='number', # ), # ], # ) # df_new2 = add_column(df, action2) # self.assertEqual(df_new2.to_dict(orient='records'), [ # dict( # properties='{\'country\': \'US\', \'age\': \'20\'}', # property_age=20, # ), # dict( # properties='{\'country\': \'CA\'}', # property_age=0, # ), # dict( # properties='{\'country\': \'UK\', \'age\': \'24\'}', # property_age=24, # ), # dict( # properties='', # property_age=0, # ), # ]) # def test_add_column_extract_dict_string_with_json(self): # df = pd.DataFrame([ # '{\"country\": \"US\", \"is_adult\": true}', # '{\"country\": \"CA\"}', # '{\"country\": \"UK\", \"is_adult\": false}', # '', # ], columns=[ # 'properties', # ]) # action = dict( # action_arguments=['properties', 'country'], # action_options=dict( # udf='extract_dict_value', # ), # outputs=[ # dict( # uuid='property_country', # column_type='text', # ), # ], # ) # df_new = add_column(df, action) # self.assertEqual(df_new.to_dict(orient='records'), [ # dict( # properties='{\"country\": \"US\", \"is_adult\": true}', # property_country='US', # ), # dict( # properties='{\"country\": \"CA\"}', # property_country='CA', # ), # dict( # properties='{\"country\": \"UK\", \"is_adult\": false}', # property_country='UK', # ), # dict( # properties='', # property_country=np.NaN, # ), # ]) # action2 = dict( # action_arguments=['properties', 'is_adult'], # action_options=dict( # udf='extract_dict_value', # ), # outputs=[ # dict( # uuid='property_is_adult', # column_type='true_or_false', # ), # ], # ) # df_new2 = add_column(df, action2) # self.assertEqual(df_new2.to_dict(orient='records'), [ # dict( # properties='{\"country\": \"US\", \"is_adult\": true}', # property_is_adult=True, # ), # dict( # properties='{\"country\": \"CA\"}', # property_is_adult=None, # ), # dict( # properties='{\"country\": \"UK\", \"is_adult\": false}', # property_is_adult=False, # ), # dict( # properties='', # property_is_adult=None, # ), # ]) def test_add_column_formatted_date(self): df = pd.DataFrame([ ['2019-04-10 08:20:58', False], ['2019-03-05 03:30:30', True], ], columns=[ 'created_at', 'boolean', ]) action = dict( action_arguments=['created_at'], action_options=dict( udf='formatted_date', format='%Y-%m-%d', ), outputs=[ dict( uuid='created_date', column_type='text', ), ], ) df_new = add_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( created_at='2019-04-10 08:20:58', boolean=False, created_date='2019-04-10', ), dict( created_at='2019-03-05 03:30:30', boolean=True, created_date='2019-03-05', ), ]) def test_add_column_if_else(self): df = pd.DataFrame([ ['2019-04-10 08:20:58'], [None], ], columns=[ 'converted_at' ]) action = dict( action_arguments=[False, True], action_code='converted_at == null', action_options=dict( udf='if_else', ), outputs=[ dict( uuid='converted', column_type='true_or_false', ), ], ) df_new = add_column(df, action, original_df=df) self.assertEqual(df_new.to_dict(orient='records'), [ dict( converted_at='2019-04-10 08:20:58', converted=True, ), dict( converted_at=None, converted=False, ), ]) def test_add_column_if_else_with_column(self): df = pd.DataFrame([ ['2019-04-10 08:20:58', 'test_user_id'], [None, None], ], columns=[ 'converted_at', 'user_id', ]) action = dict( action_arguments=['unknown', 'user_id'], action_code='converted_at == null', action_options=dict( udf='if_else', arg1_type='value', arg2_type='column', ), outputs=[ dict( uuid='user_id_clean', column_type='text', ), ], ) df_new = add_column(df, action, original_df=df) self.assertEqual(df_new.to_dict(orient='records'), [ dict( converted_at='2019-04-10 08:20:58', user_id='test_user_id', user_id_clean='test_user_id', ), dict( converted_at=None, user_id=None, user_id_clean='unknown', ), ]) def test_add_column_multiply(self): df = pd.DataFrame([ [1, 3, 7, 9], [4, 2, 9, 3], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', ]) action1 = dict( action_arguments=[ 'integer1', 'integer2', ], action_options={ 'udf': 'multiply', }, outputs=[ dict( uuid='integer_multiply', column_type='number', ), ], ) action2 = dict( action_arguments=['integer3'], action_options={ 'udf': 'multiply', 'value': 10, }, outputs=[ dict( uuid='integer_multiply2', column_type='number', ), ], ) df_new = add_column(add_column(df, action1), action2) df_expected = pd.DataFrame([ [1, 3, 7, 9, 3, 70], [4, 2, 9, 3, 8, 90], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', 'integer_multiply', 'integer_multiply2' ]) assert_frame_equal(df_new, df_expected) def test_add_column_string_replace(self): df = pd.DataFrame([ ['$1000'], ['$321. '], ['$4,321'], ], columns=[ 'amount', ]) action = dict( action_arguments=['amount'], action_options={ 'udf': 'string_replace', 'pattern': '\\$|\\.|\\,|\\s*', 'replacement': '', }, outputs=[ dict( uuid='amount_clean', column_type='true_or_false', ), ], ) df_new = add_column(df, action) df_expected = pd.DataFrame([ ['$1000', '1000'], ['$321. ', '321'], ['$4,321', '4321'], ], columns=[ 'amount', 'amount_clean', ]) assert_frame_equal(df_new, df_expected) def test_add_column_string_split(self): df = pd.DataFrame([ ['Street1, Long Beach, CA, '], ['Street2,Vernon, CA, 123'], ['Pacific Coast Highway, Los Angeles, CA, 111'], ], columns=[ 'location', ]) action = dict( action_arguments=['location'], action_options={ 'udf': 'string_split', 'separator': ',', 'part_index': 1, }, outputs=[ dict( uuid='location_city', column_type='text', ), ], ) action2 = dict( action_arguments=['location'], action_options={ 'udf': 'string_split', 'separator': ',', 'part_index': 3, }, outputs=[ dict( uuid='num', column_type='number', ), ], ) df_new = add_column(add_column(df, action), action2) df_expected = pd.DataFrame([ ['Street1, Long Beach, CA, ', 'Long Beach', 0], ['Street2,Vernon, CA, 123', 'Vernon', 123], ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111], ], columns=[ 'location', 'location_city', 'num', ]) assert_frame_equal(df_new, df_expected) def test_add_column_substring(self): df = pd.DataFrame([ ['$1000.0'], ['$321.9'], ], columns=[ 'amount', ]) action = dict( action_arguments=['amount'], action_options={ 'udf': 'substring', 'start': 1, 'stop': -2, }, outputs=[ dict( uuid='amount_int', column_type='text', ), ], ) df_new = add_column(df, action) df_expected = pd.DataFrame([ ['$1000.0', '1000'], ['$321.9', '321'], ], columns=[ 'amount', 'amount_int', ]) assert_frame_equal(df_new, df_expected) def test_average(self): from data_cleaner.transformer_actions.column import average action = self.__groupby_agg_action('average_amount') df_new = average(TEST_DATAFRAME.copy(), action) df_expected = pd.DataFrame([ [1, 1000, 1050], [2, 1050, 1100], [1, 1100, 1050], [2, 1150, 1100], ], columns=[ 'group_id', 'amount', 'average_amount' ]) assert_frame_equal(df_new, df_expected) def test_count(self): df = pd.DataFrame([ [1, 1000], [1, 1050], [1, 1100], [2, 1150], ], columns=[ 'group_id', 'order_id', ]) action = dict( action_arguments=['order_id'], action_options=dict( groupby_columns=['group_id'] ), outputs=[ dict(uuid='order_count'), ], ) df_new = count(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, order_id=1000, order_count=3, ), dict( group_id=1, order_id=1050, order_count=3, ), dict( group_id=1, order_id=1100, order_count=3, ), dict( group_id=2, order_id=1150, order_count=1, ), ]) def test_count_distinct(self): df = pd.DataFrame([ [1, 1000], [1, 1000], [1, 1100], [2, 1150], ], columns=[ 'group_id', 'order_id', ]) action = dict( action_arguments=['order_id'], action_options=dict( groupby_columns=['group_id'] ), outputs=[ dict(uuid='order_count'), ], ) df_new = count_distinct(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, order_id=1000, order_count=2, ), dict( group_id=1, order_id=1000, order_count=2, ), dict( group_id=1, order_id=1100, order_count=2, ), dict( group_id=2, order_id=1150, order_count=1, ), ]) def test_count_with_time_window(self): df = pd.DataFrame([ [1, 1000, '2021-10-01', '2021-09-01'], [1, 1050, '2021-10-01', '2021-08-01'], [1, 1100, '2021-10-01', '2021-01-01'], [2, 1150, '2021-09-01', '2021-08-01'], ], columns=[ 'group_id', 'order_id', 'group_churned_at', 'order_created_at', ]) action = dict( action_arguments=['order_id'], action_code='', action_options=dict( groupby_columns=['group_id'], timestamp_feature_a='group_churned_at', timestamp_feature_b='order_created_at', window=90*24*3600, ), outputs=[ dict(uuid='order_count'), ], ) df_new = count(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, order_id=1000, group_churned_at='2021-10-01', order_created_at='2021-09-01', order_count=2, ), dict( group_id=1, order_id=1050, group_churned_at='2021-10-01', order_created_at='2021-08-01', order_count=2, ), dict( group_id=1, order_id=1100, group_churned_at='2021-10-01', order_created_at='2021-01-01', order_count=2, ), dict( group_id=2, order_id=1150, group_churned_at='2021-09-01', order_created_at='2021-08-01', order_count=1, ), ]) def test_count_with_filter(self): df = pd.DataFrame([ [1, 1000, '2021-10-01', '2021-09-01'], [1, 1050, '2021-10-01', '2021-08-01'], [1, 1100, '2021-10-01', '2021-01-01'], [2, 1150, '2021-09-01', '2021-08-01'], [2, 1200, '2021-09-01', '2021-08-16'], [2, 1250, '2021-09-01', '2021-08-14'], ], columns=[ 'group_id', 'order_id', 'group_churned_at', 'order_created_at', ]) action = dict( action_arguments=['order_id'], action_code='order_created_at < \'2021-08-15\'', action_options=dict( groupby_columns=['group_id'], ), outputs=[ dict(uuid='order_count'), ], ) df_new = count(df, action) df_expected = pd.DataFrame([ [1, 1000, '2021-10-01', '2021-09-01', 2], [1, 1050, '2021-10-01', '2021-08-01', 2], [1, 1100, '2021-10-01', '2021-01-01', 2], [2, 1150, '2021-09-01', '2021-08-01', 2], [2, 1200, '2021-09-01', '2021-08-16', 2], [2, 1250, '2021-09-01', '2021-08-14', 2], ], columns=[ 'group_id', 'order_id', 'group_churned_at', 'order_created_at', 'order_count', ]) assert_frame_equal(df_new, df_expected) def test_clean_column_name(self): df = pd.DataFrame([ ['', '', '', '', '', '', '', '', ''] ], columns=[ 'good_name', 'Bad Case', '%@#342%34@@#342', 'yield', '12342', '1234. 23', 'true', 'true_crime', '@#f$%&*o$*(%^&r*$%&' ] ) expected_df = pd.DataFrame([ ['', '', '', '', '', '', '', '', ''] ], columns=[ 'good_name', 'bad_case', 'number_34234342', 'yield_', 'number_12342', '1234___23', 'true_', 'true_crime', 'for_' ] ) action = dict( action_type='clean_column_name', action_arguments=[ 'Bad Case', '%@#342%34@@#342', 'yield', '12342', '1234. 23', 'true', '@#f$%&*o$*(%^&r*$%&' ], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ) new_df = clean_column_name(df, action) assert_frame_equal(new_df, expected_df) def test_diff(self): df = pd.DataFrame([ ['2020-01-01', 1000], ['2020-01-02', 1050], ['2020-01-03', 1200], ['2020-01-04', 990], ], columns=[ 'date', 'sold', ]) action = dict( action_arguments=['sold'], outputs=[ dict(uuid='sold_diff'), ], ) df_new = diff(df, action) self.assertEqual(df_new.to_dict(orient='records')[1:], [ dict( date='2020-01-02', sold=1050, sold_diff=50, ), dict( date='2020-01-03', sold=1200, sold_diff=150, ), dict( date='2020-01-04', sold=990, sold_diff=-210, ), ]) # def test_expand_column(self): # df = pd.DataFrame([ # [1, 'game'], # [1, 'book'], # [1, 'game'], # [2, 'Video Game'], # [1, 'Video Game'], # [2, 'book'], # [1, 'Video Game'], # [2, 'Video Game'], # ], columns=[ # 'group_id', # 'category', # ]) # action = dict( # action_arguments=['category'], # action_options=dict( # groupby_columns=['group_id'] # ), # outputs=[ # dict(uuid='category_expanded_count_game'), # dict(uuid='category_expanded_count_book'), # dict(uuid='category_expanded_count_video_game'), # dict(uuid='category_expanded_count_clothing'), # ], # ) # df_new = expand_column(df, action) # df_expected = pd.DataFrame([ # [1, 'game', 2, 1, 2], # [1, 'book', 2, 1, 2], # [1, 'game', 2, 1, 2], # [2, 'Video Game', 0, 1, 2], # [1, 'Video Game', 2, 1, 2], # [2, 'book', 0, 1, 2], # [1, 'Video Game', 2, 1, 2], # [2, 'Video Game', 0, 1, 2], # ], columns=[ # 'group_id', # 'category', # 'category_expanded_count_game', # 'category_expanded_count_book', # 'category_expanded_count_video_game', # ]) # assert_frame_equal(df_new, df_expected) # def test_expand_column_with_time_window(self): # df = pd.DataFrame([ # [1, 'game', '2021-01-02', '2021-01-04'], # [1, 'book', '2021-01-02', '2021-01-04'], # [1, 'game', '2021-01-03', '2021-01-04'], # [2, 'Video Game', '2021-01-01', '2021-01-03'], # [1, 'Video Game', '2021-01-01', '2021-01-04'], # [2, 'book', '2021-01-02', '2021-01-03'], # [1, 'Video Game', '2021-01-03', '2021-01-04'], # [2, 'Video Game', '2020-12-30', '2021-01-03'], # ], columns=[ # 'group_id', # 'category', # 'timestamp1', # 'timestamp2', # ]) # action = dict( # action_arguments=['category'], # action_options=dict( # groupby_columns=['group_id'], # timestamp_feature_a='timestamp2', # timestamp_feature_b='timestamp1', # window=172800, # ), # outputs=[ # dict(uuid='category_expanded_count_game_2d'), # dict(uuid='category_expanded_count_book_2d'), # dict(uuid='category_expanded_count_video_game_2d'), # dict(uuid='category_expanded_count_clothing_2d'), # ], # ) # df_new = expand_column(df, action) # df_expected = pd.DataFrame([ # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1], # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1], # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1], # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1], # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1], # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1], # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1], # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1], # ], columns=[ # 'group_id', # 'category', # 'timestamp1', # 'timestamp2', # 'category_expanded_count_game_2d', # 'category_expanded_count_book_2d', # 'category_expanded_count_video_game_2d', # ]) # assert_frame_equal(df_new, df_expected) def test_first_column(self): df = pd.DataFrame([ [1, 1000], [2, 1050], [1, 1100], [2, 1150], ], columns=[ 'group_id', 'order_id', ]) action = dict( action_arguments=['order_id'], action_options=dict( groupby_columns=['group_id'] ), outputs=[ dict(uuid='first_order'), ], ) df_new = first(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, order_id=1000, first_order=1000, ), dict( group_id=2, order_id=1050, first_order=1050, ), dict( group_id=1, order_id=1100, first_order=1000, ), dict( group_id=2, order_id=1150, first_order=1050, ), ]) def test_impute(self): from data_cleaner.transformer_actions.column import impute df = pd.DataFrame([ ['2020-01-01', 1000, ' ', 800], ['2020-01-02', '', 1200, 700], ['2020-01-03', 1200, np.NaN, 900], ['2020-01-04', np.NaN, ' ', 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) action1 = dict( action_arguments=['sold', 'curr_profit'], action_options={ 'value': '0', }, action_variables={ '0': { 'feature': { 'column_type': 'number', 'uuid': 'sold', }, 'type': 'feature', }, '1': { 'feature': { 'column_type': 'number', 'uuid': 'curr_profit', }, 'type': 'feature', }, }, ) action2 = dict( action_arguments=['sold'], action_options={ 'value': '0', }, action_variables={ '0': { 'feature': { 'column_type': 'number', 'uuid': 'sold', }, 'type': 'feature', }, }, ) action3 = dict( action_arguments=['sold', 'curr_profit'], action_options={ 'strategy': 'average', }, ) action4 = dict( action_arguments=['sold', 'curr_profit'], action_options={ 'strategy': 'median', }, ) action5 = dict( action_arguments=['sold', 'curr_profit'], action_options={ 'strategy': 'column', 'value': 'prev_sold', }, ) action_invalid = dict( action_arguments=['sold', 'curr_profit'], action_options={ 'strategy': 'mode', }, ) df_new1 = impute(df.copy(), action1) df_new2 = impute(df.copy(), action2) df_new3 = impute(df.copy(), action3) df_new4 = impute(df.copy(), action4) df_new5 = impute(df.copy(), action5) df_expected1 = pd.DataFrame([ ['2020-01-01', 1000, 0, 800], ['2020-01-02', 0, 1200, 700], ['2020-01-03', 1200, 0, 900], ['2020-01-04', 0, 0, 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) df_expected2 = pd.DataFrame([ ['2020-01-01', 1000, ' ', 800], ['2020-01-02', 0, 1200, 700], ['2020-01-03', 1200, np.nan, 900], ['2020-01-04', 0, ' ', 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) df_expected3 = pd.DataFrame([ ['2020-01-01', 1000, 1250, 800], ['2020-01-02', 1300, 1200, 700], ['2020-01-03', 1200, 1250, 900], ['2020-01-04', 1300, 1250, 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) df_expected4 = pd.DataFrame([ ['2020-01-01', 1000, 1250, 800], ['2020-01-02', 1200, 1200, 700], ['2020-01-03', 1200, 1250, 900], ['2020-01-04', 1200, 1250, 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) df_expected5 = pd.DataFrame([ ['2020-01-01', 1000, 800, 800], ['2020-01-02', 700, 1200, 700], ['2020-01-03', 1200, 900, 900], ['2020-01-04', 700, 700, 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) df_new1['sold'] = df_new1['sold'].astype(int) df_new1['curr_profit'] = df_new1['curr_profit'].astype(int) df_new2['sold'] = df_new2['sold'].astype(int) df_new3['sold'] = df_new3['sold'].astype(int) df_new3['curr_profit'] = df_new3['curr_profit'].astype(int) df_new4['sold'] = df_new4['sold'].astype(int) df_new4['curr_profit'] = df_new4['curr_profit'].astype(int) df_new5['sold'] = df_new5['sold'].astype(int) df_new5['curr_profit'] = df_new5['curr_profit'].astype(int) assert_frame_equal(df_new1, df_expected1) assert_frame_equal(df_new2, df_expected2) assert_frame_equal(df_new3, df_expected3) assert_frame_equal(df_new4, df_expected4) assert_frame_equal(df_new5, df_expected5) with self.assertRaises(Exception): _ = impute(df.copy(), action_invalid) def test_last_column(self): df = pd.DataFrame([ [1, 1000], [2, 1050], [1, 1100], [2, 1150], ], columns=[ 'group_id', 'order_id', ]) action = dict( action_arguments=['order_id'], action_options=dict( groupby_columns=['group_id'] ), outputs=[ dict(uuid='last_order'), ], ) df_new = last(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, order_id=1000, last_order=1100, ), dict( group_id=2, order_id=1050, last_order=1150, ), dict( group_id=1, order_id=1100, last_order=1100, ), dict( group_id=2, order_id=1150, last_order=1150, ), ]) def test_max(self): from data_cleaner.transformer_actions.column import max action = self.__groupby_agg_action('max_amount') df_new = max(TEST_DATAFRAME.copy(), action) df_expected = pd.DataFrame([ [1, 1000, 1100], [2, 1050, 1150], [1, 1100, 1100], [2, 1150, 1150], ], columns=[ 'group_id', 'amount', 'max_amount', ]) assert_frame_equal(df_new, df_expected) action2 = dict( action_arguments=['amount'], action_options=dict(), outputs=[ dict(uuid='max_amount'), ], ) df_new2 = max(TEST_DATAFRAME.copy(), action2) df_expected2 = pd.DataFrame([ [1, 1000, 1150], [2, 1050, 1150], [1, 1100, 1150], [2, 1150, 1150], ], columns=[ 'group_id', 'amount', 'max_amount', ]) assert_frame_equal(df_new2, df_expected2) def test_median(self): from data_cleaner.transformer_actions.column import median action = self.__groupby_agg_action('median_amount') df = pd.DataFrame([ [1, 1000], [2, 1050], [1, 1100], [2, 1550], [2, 1150], ], columns=[ 'group_id', 'amount', ]) df_new = median(df, action) df_expected = pd.DataFrame([ [1, 1000, 1050], [2, 1050, 1150], [1, 1100, 1050], [2, 1550, 1150], [2, 1150, 1150], ], columns=[ 'group_id', 'amount', 'median_amount', ]) assert_frame_equal(df_new, df_expected) def test_min(self): from data_cleaner.transformer_actions.column import min action = self.__groupby_agg_action('min_amount') df_new = min(TEST_DATAFRAME.copy(), action) df_expected = pd.DataFrame([ [1, 1000, 1000], [2, 1050, 1050], [1, 1100, 1000], [2, 1150, 1050], ], columns=[ 'group_id', 'amount', 'min_amount', ]) assert_frame_equal(df_new, df_expected) def test_select(self): df = pd.DataFrame([ [1, 1000], [2, 1050], ], columns=[ 'group_id', 'order_id', ]) action = dict( action_arguments=['group_id'] ) df_new = select(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, ), dict( group_id=2, ), ]) def test_shift_down(self): df = pd.DataFrame([ ['2020-01-01', 1000], ['2020-01-02', 1050], ['2020-01-03', 1200], ['2020-01-04', 990], ], columns=[ 'date', 'sold', ]) action = dict( action_arguments=['sold'], outputs=[ dict(uuid='prev_sold'), ], ) df_new = shift_down(df, action) self.assertEqual(df_new.to_dict(orient='records')[1:], [ dict( date='2020-01-02', sold=1050, prev_sold=1000, ), dict( date='2020-01-03', sold=1200, prev_sold=1050, ), dict( date='2020-01-04', sold=990, prev_sold=1200, ), ]) def test_shift_down_with_groupby(self): df = pd.DataFrame([ [1, '2020-01-01', 1000], [1, '2020-01-02', 1050], [2, '2020-01-03', 1200], [1, '2020-01-04', 990], [2, '2020-01-05', 980], [2, '2020-01-06', 970], [2, '2020-01-07', 960], ], columns=[ 'group_id', 'date', 'sold', ]) action = dict( action_arguments=['sold'], action_options=dict( groupby_columns=['group_id'], periods=2, ), outputs=[ dict(uuid='prev_sold'), ], ) df_new = shift_down(df, action) df_expected = pd.DataFrame([ [1, '2020-01-01', 1000, None], [1, '2020-01-02', 1050, None], [2, '2020-01-03', 1200, None], [1, '2020-01-04', 990, 1000], [2, '2020-01-05', 980, None], [2, '2020-01-06', 970, 1200], [2, '2020-01-07', 960, 980], ], columns=[ 'group_id', 'date', 'sold', 'prev_sold', ]) assert_frame_equal(df_new, df_expected) def test_shift_up(self): df = pd.DataFrame([ ['2020-01-01', 1000], ['2020-01-02', 1050], ['2020-01-03', 1200], ['2020-01-04', 990], ], columns=[ 'date', 'sold', ]) action = dict( action_arguments=['sold'], outputs=[ dict(uuid='next_sold'), ], ) df_new = shift_up(df, action) self.assertEqual(df_new.to_dict(orient='records')[:-1], [ dict( date='2020-01-01', sold=1000, next_sold=1050, ), dict( date='2020-01-02', sold=1050, next_sold=1200, ), dict( date='2020-01-03', sold=1200, next_sold=990, ), ]) def test_sum(self): from data_cleaner.transformer_actions.column import sum action = self.__groupby_agg_action('total_amount') df_new = sum(TEST_DATAFRAME.copy(), action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, amount=1000, total_amount=2100, ), dict( group_id=2, amount=1050, total_amount=2200, ), dict( group_id=1, amount=1100, total_amount=2100, ), dict( group_id=2, amount=1150, total_amount=2200, ), ]) def __groupby_agg_action(self, output_col): return dict( action_arguments=['amount'], action_options=dict( groupby_columns=['group_id'] ), outputs=[ dict(uuid=output_col), ], ) mage_ai/tests/data_cleaner/transformer_actions/test_base.py METASEP from data_cleaner.transformer_actions.base import BaseAction from data_cleaner.shared.hash import merge_dict from tests.base_test import TestCase from tests.data_cleaner.transformer_actions.shared import TEST_ACTION import numpy as np import pandas as pd def build_df(): return pd.DataFrame([ [2, False, 5.0], ['$3', False, '$6.0', 1], ['$4,000', None, '$7,000', 200], ['$3', False, '$4.0', 3], ['$4,000', None, 3.0, 4], [5, True, 8000, 5], ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index') class BaseActionTests(TestCase): # def test_execute(self): # df = build_df() # base_action = BaseAction(merge_dict( # TEST_ACTION, # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'), # )) # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [ # ['$3', False, '$6.0'], # ['$4,000', None, '$7,000'], # ]) def test_execute_axis_column(self): df = build_df() base_action = BaseAction(merge_dict( TEST_ACTION, dict( action_arguments=[ '%{1_1}', # '%{3_1}', ], action_type='remove', axis='column', ), )) df_new = base_action.execute(df) self.assertEqual(df_new.values.tolist(), [ [False, 5.0], [False, '$6.0'], [None, '$7,000'], [False, '$4.0'], [None, 3.0], [True, 8000], ]) def test_execute_with_no_columns_to_transform(self): df = build_df() base_action = BaseAction(merge_dict( TEST_ACTION, dict( action_arguments=[ '%{1_1}', ], action_type='remove', axis='column', ), )) raised = False try: base_action.execute(df.drop(columns=['deposited'])) except Exception: raised = True self.assertFalse(raised) def test_groupby(self): df = pd.DataFrame([ ['a', '2020-01-03', 1050], ['a', '2020-01-01', 1000], ['b', '2020-01-04', 990], ['a', '2020-01-02', 1100], ['b', '2020-01-03', 1200], ], columns=[ 'store', 'date', 'sold', ]) base_action = BaseAction(dict( action_type='group', action_arguments=['store'], action_code='', action_variables=dict(), child_actions=[ dict( action_type='sort', axis='row', action_arguments=['date'], action_code='', action_variables=dict(), ), dict( action_type='diff', action_arguments=['sold'], action_code='', action_variables=dict(), axis='column', outputs=[dict(uuid='sold_diff')] ), dict( action_type='shift_down', action_arguments=['sold'], action_code='', action_variables=dict(), axis='column', outputs=[dict(uuid='prev_sold')] ), ], )) df_new = base_action.execute(df) df_new = df_new.fillna(0) self.assertEqual(df_new.values.tolist(), [ ['a', '2020-01-01', 1000, 0, 0], ['a', '2020-01-02', 1100, 100, 1000], ['a', '2020-01-03', 1050, -50, 1100], ['b', '2020-01-03', 1200, 0, 0], ['b', '2020-01-04', 990, -210, 1200], ]) def test_hydrate_action(self): base_action = BaseAction(TEST_ACTION) base_action.hydrate_action() hydrated_action = TEST_ACTION.copy() hydrated_action['action_code'] = \ 'omni.deposited == True and (omni.fund == "The Quant" or omni.fund == "Yield")' hydrated_action['action_arguments'] = [ 'omni.deposited', 'magic.spell', ] hydrated_action['action_options'] = dict( condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000', default=0, timestamp_feature_a='omni.fund', timestamp_feature_b='omni.delivered_at', window=2592000, ) self.assertEqual(base_action.action, hydrated_action) def test_hydrate_action_when_adding_column(self): base_action = BaseAction(merge_dict(TEST_ACTION, dict( action_type='add', axis='column', ))) base_action.hydrate_action() hydrated_action = TEST_ACTION.copy() hydrated_action['action_code'] = \ 'omni.deposited == True and (omni.fund == "The Quant" or omni.fund == "Yield")' hydrated_action['action_type'] = 'add' hydrated_action['axis'] = 'column' hydrated_action['action_arguments'] = [ 'omni.deposited', 'magic.spell', ] hydrated_action['action_options'] = dict( condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000', default=0, timestamp_feature_a='omni.fund', timestamp_feature_b='omni.delivered_at', window=2592000, ) self.assertEqual(base_action.action, hydrated_action) def test_join(self): df1 = pd.DataFrame([ ['a', '2020-01-03', 1050], ['a', '2020-01-01', 1000], ['b', '2020-01-04', 990], ['a', '2020-01-02', 1100], ['b', '2020-01-03', 1200], ['c', '2020-01-07', 1250], ], columns=[ 'store', 'date', 'sold', ]) df2 = pd.DataFrame([ ['a', 'Store A'], ['b', 'Store B'], ], columns=[ 'store_name', 'description', ]) base_action = BaseAction(dict( action_type='join', action_arguments=[100], action_code='', action_options=dict( left_on=['store'], right_on=['store_name'], drop_columns=['store_name'], rename_columns={'description': 'store_description'} ), action_variables=dict(), )) df_new = base_action.execute(df1, df_to_join=df2) self.assertEqual(df_new.values.tolist(), [ ['a', '2020-01-03', 1050, 'Store A'], ['a', '2020-01-01', 1000, 'Store A'], ['b', '2020-01-04', 990, 'Store B'], ['a', '2020-01-02', 1100, 'Store A'], ['b', '2020-01-03', 1200, 'Store B'], ['c', '2020-01-07', 1250, np.NaN], ]) self.assertEqual(df_new.columns.to_list(), [ 'store', 'date', 'sold', 'store_description', ]) def test_join_rename_column(self): df1 = pd.DataFrame([ ['a', '2020-01-03', 1050], ['a', '2020-01-01', 1000], ['b', '2020-01-04', 990], ['a', '2020-01-02', 1100], ['b', '2020-01-03', 1200], ['c', '2020-01-07', 1250], ], columns=[ 'store', 'date', 'sold', ]) df2 = pd.DataFrame([ ['a', 'Store A', '2020-02-01'], ['b', 'Store B', '2020-02-02'], ], columns=[ 'store_name', 'description', 'date', ]) base_action = BaseAction(dict( action_type='join', action_arguments=[100], action_code='', action_options=dict( left_on=['store'], right_on=['store_name'], drop_columns=['store_name'], rename_columns={'description': 'store_description'} ), action_variables=dict(), outputs=[ { 'source_feature': { 'uuid': 'store_name', }, 'uuid': 'store_name', }, { 'source_feature': { 'uuid': 'description', }, 'uuid': 'description', }, { 'source_feature': { 'uuid': 'date', }, 'uuid': 'date_1', } ] )) df_new = base_action.execute(df1, df_to_join=df2) self.assertEqual(df_new.values.tolist(), [ ['a', '2020-01-03', 1050, 'Store A', '2020-02-01'], ['a', '2020-01-01', 1000, 'Store A', '2020-02-01'], ['b', '2020-01-04', 990, 'Store B', '2020-02-02'], ['a', '2020-01-02', 1100, 'Store A', '2020-02-01'], ['b', '2020-01-03', 1200, 'Store B', '2020-02-02'], ['c', '2020-01-07', 1250, np.NaN, np.NaN], ]) self.assertEqual(df_new.columns.to_list(), [ 'store', 'date', 'sold', 'store_description', 'date_1', ]) def test_join_rename_join_key(self): df1 = pd.DataFrame([ ['a', '2020-01-03', 1050], ['a', '2020-01-01', 1000], ['b', '2020-01-04', 990], ['a', '2020-01-02', 1100], ['b', '2020-01-03', 1200], ['c', '2020-01-07', 1250], ], columns=[ 'store', 'date', 'sold', ]) df2 = pd.DataFrame([ ['a', 'Store A', '2020-02-01'], ['b', 'Store B', '2020-02-02'], ], columns=[ 'store', 'description', 'date', ]) base_action = BaseAction(dict( action_type='join', action_arguments=[100], action_code='', action_options=dict( left_on=['store'], right_on=['store'], ), action_variables=dict(), outputs=[ { 'source_feature': { 'uuid': 'store', }, 'uuid': 'store_1', }, { 'source_feature': { 'uuid': 'description', }, 'uuid': 'description', }, { 'source_feature': { 'uuid': 'date', }, 'uuid': 'date_1', } ] )) df_new = base_action.execute(df1, df_to_join=df2) self.assertEqual(df_new.values.tolist(), [ ['a', '2020-01-03', 1050, 'a', 'Store A', '2020-02-01'], ['a', '2020-01-01', 1000, 'a', 'Store A', '2020-02-01'], ['b', '2020-01-04', 990, 'b', 'Store B', '2020-02-02'], ['a', '2020-01-02', 1100, 'a', 'Store A', '2020-02-01'], ['b', '2020-01-03', 1200, 'b', 'Store B', '2020-02-02'], ['c', '2020-01-07', 1250, np.NaN, np.NaN, np.NaN], ]) self.assertEqual(df_new.columns.to_list(), [ 'store', 'date', 'sold', 'store_1', 'description', 'date_1', ]) def test_join_cast_to_str(self): df1 = pd.DataFrame([ [1, '2020-01-03', 1050], [1, '2020-01-01', 1000], [2, '2020-01-04', 990], [1, '2020-01-02', 1100], [2, '2020-01-03', 1200], [3, '2020-01-07', 1250], ], columns=[ 'store', 'date', 'sold', ]) df2 = pd.DataFrame([ ['1', 'Store A'], ['2', 'Store B'], ], columns=[ 'store_name', 'description', ]) base_action = BaseAction(dict( action_type='join', action_arguments=[100], action_code='', action_options=dict( left_on=['store'], right_on=['store_name'], drop_columns=['store_name'], rename_columns={'description': 'store_description'} ), action_variables=dict(), )) df_new = base_action.execute(df1, df_to_join=df2) self.assertEqual(df_new.values.tolist(), [ ['1', '2020-01-03', 1050, 'Store A'], ['1', '2020-01-01', 1000, 'Store A'], ['2', '2020-01-04', 990, 'Store B'], ['1', '2020-01-02', 1100, 'Store A'], ['2', '2020-01-03', 1200, 'Store B'], ['3', '2020-01-07', 1250, np.NaN], ]) self.assertEqual(df_new.columns.to_list(), [ 'store', 'date', 'sold', 'store_description', ]) mage_ai/tests/data_cleaner/transformer_actions/shared.py METASEP TEST_ACTION = dict( action_type='filter', axis='row', action_code='%{1}.%{1_1} == True and (%{1}.%{1_2} == \"The Quant\" or %{1}.%{1_2} == \"Yield\")', action_arguments=[ '%{1}.%{1_1}', '%{2}.%{2_1}', ], action_options=dict( condition='%{1}.%{1_3} >= %{2}.%{2_2} and %{2}.%{2_2} >= %{1}.%{1_3} - 2592000', default=0, timestamp_feature_a='%{1}.%{1_2}', timestamp_feature_b='%{1}.%{1_3}', window=2592000, ), action_variables={ '1': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='omni'), )), '1_1': dict(type='feature', feature=dict(column_type='number', uuid='deposited')), '1_2': dict(type='feature', feature=dict(column_type='category', uuid='fund')), '1_3': dict(type='feature', feature=dict(column_type='category', uuid='delivered_at')), '1_4': dict(type='feature', feature=dict(column_type='number_with_decimals', uuid='amount')), '2': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='magic'), )), '2_1': dict(type='feature', feature=dict(column_type='category', uuid='spell')), '2_2': dict(type='feature', feature=dict(column_type='category', uuid='booked_at')), '3_1': dict(type='feature', feature=dict(column_type='number', uuid='age')), }, ) mage_ai/tests/data_cleaner/transformer_actions/__init__.py METASEP mage_ai/tests/base_test.py METASEP import unittest class TestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): pass mage_ai/tests/__init__.py METASEP mage_ai/tests/data_cleaner/test_column_type_detector.py METASEP from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, EMAIL, MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES, NUMBER, NUMBER_WITH_DECIMALS, PHONE_NUMBER, TEXT, TRUE_OR_FALSE, ZIP_CODE, get_mismatched_row_count, infer_column_types, ) from tests.base_test import TestCase from faker import Faker import pandas as pd fake = Faker() class ColumnTypeDetectorTests(TestCase): def test_get_mismatched_row_count(self): df = pd.DataFrame([ [1, '[email protected]', '32132'], [2, '[email protected]', '12345'], [3, 'test', '1234'], [4, '[email protected]', 'abcde'], [5, 'abc12345@', '54321'], [6, '[email protected]', '56789'], ], columns=['id', 'email', 'zip_code']) count1 = get_mismatched_row_count(df['id'], 'number') count2 = get_mismatched_row_count(df['email'], 'email') count3 = get_mismatched_row_count(df['zip_code'], 'zip_code') self.assertEqual(count1, 0) self.assertEqual(count2, 2) self.assertEqual(count3, 1) def test_infer_column_types(self): columns = [ 'true_or_false', 'number_with_decimals', 'category', 'datetime', 'text', 'number', 'number_with_dollars', 'number_with_percentage', 'zip_code', 'zip_code_with_3_numbers', 'invalid_zip_code', 'email', 'phone_number', 'datetime_abnormal', 'name', ] table = [ [ '1', 3, 'male', '2020-1-1', '1.0', 1, 3, '30%', '10128-1213', 123, 123, '[email protected]', '123-456-7890', 'May 4, 2021, 6:35 PM', fake.name(), ], [ '1', 12.0, 'female', '2020-07-13', ' '.join(['t' for i in range(MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES + 1)]), 2, '$4', '12.32%', 12345, 1234, 1234, '[email protected]', '(123) 456-7890', 'Feb 17, 2021, 2:57 PM', fake.name(), ], [ '1', 0, 'machine', '2020-06-25 01:02', ' '.join(['t' for i in range(MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES + 1)]), 3, '$5,000', '50%', '12345', 12345, 12345, '[email protected]', '1234567890', 'Feb 18, 2021, 2:57 PM', fake.name(), ], [ 0, '40.7', 'mutant', '2020-12-25 01:02:03', ' '.join(['t' for i in range(MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES + 1)]), 4, '$5,000.01', '20%', '12345', 12345, 123456, '[email protected]', '1234567', 'Feb 19, 2021, 2:57 PM', fake.name(), ], [ 0, '40.7', 'alien', '2020-12-25T01:02:03.000Z', ' '.join(['t' for i in range(MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES + 1)]), 4, '-$10128,121.3123', '18%', '01234', 12345, 12, '[email protected]', '(123)456-7890', 'Feb 20, 2021, 2:57 PM', fake.name(), ], ] date_formats = [ '01/1/2019', '1/1/2019', '1/21/2019', '11/1/2019', '2020/01/1', '2020/1/01', '2020/1/1', 'Pending', ] for date_format in date_formats: table.append([ 0, '40.7', 'mutant', date_format, fake.text(), 4, '$5,000.01', '15.32%', '01234', 12345, 12, '[email protected]', '(123)456-7890', 'Feb 18, 2021, 2:57 PM', fake.name(), ]) df = pd.DataFrame(table, columns=columns) column_types = infer_column_types(df) self.assertEqual( column_types, { 'true_or_false': TRUE_OR_FALSE, 'number_with_decimals': NUMBER_WITH_DECIMALS, 'category': CATEGORY, 'datetime': DATETIME, 'text': TEXT, 'number': NUMBER, 'number_with_dollars': NUMBER_WITH_DECIMALS, 'number_with_percentage': NUMBER_WITH_DECIMALS, 'zip_code': ZIP_CODE, 'zip_code_with_3_numbers': ZIP_CODE, 'invalid_zip_code': NUMBER, 'email': EMAIL, 'phone_number': PHONE_NUMBER, 'datetime_abnormal': DATETIME, 'name': TEXT, }, ) mage_ai/tests/data_cleaner/__init__.py METASEP mage_ai/tests/data_cleaner/cleaning_rules/test_remove_duplicate_rows.py METASEP from data_cleaner.cleaning_rules.remove_duplicate_rows \ import RemoveDuplicateRows from tests.base_test import TestCase import numpy as np import pandas as pd class RemoveDuplicateRowsTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01'], [2, '2022-01-02'], [3, '2022-01-03'], [2, '2022-01-02'], [4, '2022-01-04'], [5, '2022-01-05'], [3, '2022-01-03'] ], columns=['id', 'deleted_at']) column_types = { 'id': 'number', 'deleted_at': 'datetime', } result = RemoveDuplicateRows( df, column_types, {}, ).evaluate() self.assertEqual(result, [ dict( title='Remove duplicate rows', message='There\'re 2 duplicate rows in the dataset.' ' Suggest to remove them.', action_payload=dict( action_type='drop_duplicate', action_arguments=[], action_code='', action_options={}, action_variables={}, axis='row', outputs=[], ), ) ]) mage_ai/tests/data_cleaner/cleaning_rules/test_remove_columns_with_single_value.py METASEP from data_cleaner.cleaning_rules.remove_columns_with_single_value \ import RemoveColumnsWithSingleValue from tests.base_test import TestCase import pandas as pd import numpy as np class RemoveColumnWithSingleValueTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01', True], [2, '2022-01-02', True], [3, np.NaN, True], [4, np.NaN, True], [5, np.NaN, True], ], columns=['id', 'deleted_at', 'is_active']) column_types = { 'id': 'number', 'deleted_at': 'datetime', 'is_active': 'true_or_false', } statistics = { 'id/count_distinct': 5, 'deleted_at/count_distinct': 2, 'is_active/count_distinct': 1, } result = RemoveColumnsWithSingleValue(df, column_types, statistics).evaluate() self.assertEqual(result, [ dict( title='Remove columns with single value', message='The following columns have single value in all rows: [\'is_active\'].' ' Suggest to remove them.', action_payload=dict( action_type='remove', action_arguments=['is_active'], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) mage_ai/tests/data_cleaner/cleaning_rules/test_remove_columns_with_high_empty_rate.py METASEP from data_cleaner.cleaning_rules.remove_columns_with_high_empty_rate \ import RemoveColumnsWithHighEmptyRate from tests.base_test import TestCase import numpy as np import pandas as pd class RemoveColumnWithHighEmptyRateTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01'], [2, np.NaN], [3, np.NaN], [4, np.NaN], [5, np.NaN], ], columns=['id', 'deleted_at']) column_types = { 'id': 'number', 'deleted_at': 'datetime', } statistics = { 'id/null_value_rate': 0, 'deleted_at/null_value_rate': 0.8, } result = RemoveColumnsWithHighEmptyRate( df, column_types, statistics, ).evaluate() self.assertEqual(result, [ dict( title='Remove columns with high empty rate', message='The following columns have high empty rate: [\'deleted_at\'].' ' Removing them may increase your data quality.', action_payload=dict( action_type='remove', action_arguments=['deleted_at'], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) mage_ai/tests/data_cleaner/cleaning_rules/test_clean_column_names.py METASEP from data_cleaner.cleaning_rules.clean_column_names import CleanColumnNames from data_cleaner.transformer_actions.constants import ActionType from tests.base_test import TestCase import pandas as pd class CleanColumnNameTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ ['', '', '', '', '', '', '' , '', ''], ], columns=[ 'good_name', 'Bad Case', '%@#342%34@@#342', 'yield', '12342', '1234. 23', 'true', 'true_crime', '@#f$%&*o$*(%^&r*$%&' ] ) result = CleanColumnNames( df, {}, {}, ).evaluate() self.assertEqual(result, [ dict( title='Clean dirty column names', message='The following columns have unclean naming conventions: ' '[\'Bad Case\', \'%@#342%34@@#342\', \'yield\',' ' \'12342\', \'1234. 23\', \'true\', \'@#f$%&*o$*(%^&r*$%&\']' '. Making these names lowercase and alphanumeric may improve' 'ease of dataset access and reduce security risks.', action_payload=dict( action_type=ActionType.CLEAN_COLUMN_NAME, action_arguments=[ 'Bad Case', '%@#342%34@@#342', 'yield', '12342', '1234. 23', 'true', '@#f$%&*o$*(%^&r*$%&' ], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) mage_ai/tests/data_cleaner/cleaning_rules/__init__.py METASEP mage_ai/tests/data_cleaner/cleaning_rules/test_remove_collinear_columns.py METASEP
[ { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_evaluate(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_bad_dtypes(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, '10', 'cute animal #1', 100, '30'],\n ['500', 'CA', 10000, '20', 'intro to regression', 3000, '20'],\n [200, '', np.nan, 50, 'daily news #1', None, '75'],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, '20'],\n ['1000', 'MX', 45003, '20', 'cute animal #4', 90, '40'],\n [1500, 'MX', 75000, '30', '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, '25'],\n [None, 'US', 75000, '30', 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, '50', 'cute animal #3', 80, '20'],\n ['200', 'CA', 5000, '30', '', 10000, '30'],\n [800, 'US', 12050, '40', 'meme compilation', 2000, '45'],\n ['600', 'CA', 11000, '50', 'daily news #2', 3000, '50'],\n [600, 'CA', '', 50, '', 3000, None], \n ['700', 'MX', 11750, '20', 'cute animal #2', 2750, '55'],\n [700, '', None, 20, '', None, '55'], \n [700, 'MX', 11750, '', '', 2750, '55'], \n [1200, 'MX', 52000, '10', 'vc funding strats', 75, '60']\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'location': 'category',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'name': 'text',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_dirty(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [200, np.nan, 50, None, 75], \n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1500, 75000, np.nan, 70, 25], \n [None, 75000, 30, 70, np.nan], \n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [600, '', 50, 3000, None], \n [700, 11750, 20, 2750, 55],\n [700, None, 20, None, 55], \n [700, 11750, '', 2750, 55], \n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))", "type": "inproject" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_evaluate(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(", "type": "inproject" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_evaluate(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_bad_dtypes(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, '10', 'cute animal #1', 100, '30'],\n ['500', 'CA', 10000, '20', 'intro to regression', 3000, '20'],\n [200, '', np.nan, 50, 'daily news #1', None, '75'],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, '20'],\n ['1000', 'MX', 45003, '20', 'cute animal #4', 90, '40'],\n [1500, 'MX', 75000, '30', '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, '25'],\n [None, 'US', 75000, '30', 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, '50', 'cute animal #3', 80, '20'],\n ['200', 'CA', 5000, '30', '', 10000, '30'],\n [800, 'US', 12050, '40', 'meme compilation', 2000, '45'],\n ['600', 'CA', 11000, '50', 'daily news #2', 3000, '50'],\n [600, 'CA', '', 50, '', 3000, None], \n ['700', 'MX', 11750, '20', 'cute animal #2', 2750, '55'],\n [700, '', None, 20, '', None, '55'], \n [700, 'MX', 11750, '', '', 2750, '55'], \n [1200, 'MX', 52000, '10', 'vc funding strats', 75, '60']\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'location': 'category',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'name': 'text',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],", "type": "inproject" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}", "type": "inproject" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_evaluate(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_bad_dtypes(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, '10', 'cute animal #1', 100, '30'],\n ['500', 'CA', 10000, '20', 'intro to regression', 3000, '20'],\n [200, '', np.nan, 50, 'daily news #1', None, '75'],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, '20'],\n ['1000', 'MX', 45003, '20', 'cute animal #4', 90, '40'],\n [1500, 'MX', 75000, '30', '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, '25'],\n [None, 'US', 75000, '30', 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, '50', 'cute animal #3', 80, '20'],\n ['200', 'CA', 5000, '30', '', 10000, '30'],\n [800, 'US', 12050, '40', 'meme compilation', 2000, '45'],\n ['600', 'CA', 11000, '50', 'daily news #2', 3000, '50'],\n [600, 'CA', '', 50, '', 3000, None], \n ['700', 'MX', 11750, '20', 'cute animal #2', 2750, '55'],\n [700, '', None, 20, '', None, '55'], \n [700, 'MX', 11750, '', '', 2750, '55'], \n [1200, 'MX', 52000, '10', 'vc funding strats', 75, '60']\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'location': 'category',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'name': 'text',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_dirty(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [200, np.nan, 50, None, 75], \n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1500, 75000, np.nan, 70, 25], \n [None, 75000, 30, 70, np.nan], \n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [600, '', 50, 3000, None], \n [700, 11750, 20, 2750, 55],\n [700, None, 20, None, 55], \n [700, 11750, '', 2750, 55], \n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_non_numeric(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, 10, 'cute animal #1', 100, 30],\n [500, 'CA', 10000, 20, 'intro to regression', 3000, 20],\n [200, '', np.nan, 50, 'daily news #1', None, 75],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, 20],\n [1000, 'MX', 45003, 20, 'cute animal #4', 90, 40],\n [1500, 'MX', 75000, 30, '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, 25],\n [None, 'US', 75000, 30, 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, 50, 'cute animal #3', 80, 20],\n [200, 'CA', 5000, 30, '', 10000, 30],\n [800, 'US', 12050, 40, 'meme compilation', 2000, 45],\n [600, 'CA', 11000, 50, 'daily news #2', 3000, 50],\n [600, 'CA', '', 50, '', 3000, None], \n [700, 'MX', 11750, 20, 'cute animal #2', 2750, 55],\n [700, '', None, 20, '', None, 55], \n [700, 'MX', 11750, '', '', 2750, 55], \n [1200, 'MX', 52000, 10, 'vc funding strats', 75, 60]\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'location': 'category',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'name': 'text',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_perfectly_collinear(self):\n number_of_users = self.rng.integers(1000, 500000, (10000))\n views = number_of_users * 300\n revenue = 2 * views - number_of_users\n losses = revenue / views + number_of_users\n df = pd.DataFrame({\n 'number_of_users':number_of_users, \n 'views': views, \n 'revenue': revenue, \n 'losses': losses\n })\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset:'\n ' [\\'number_of_users\\', \\'views\\', \\'revenue\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users', 'views', 'revenue'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(result, expected_results)\n\n def test_vif_calcuation(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n expected_vifs_no_remove = (\n 59.32817701051733,\n 26.10502642724925,\n 5.6541251174451315,\n 2.6033835916281176,\n 10.735934980453335\n )\n expected_vifs_remove = (\n 59.32817701051733,\n 2.941751614824833,\n 3.4216357503903243,\n 1.370441833599666\n )\n for column, expected_vif in zip(rule.numeric_columns, expected_vifs_no_remove):\n vif = rule.get_variance_inflation_factor(column)\n self.assertAlmostEqual(vif, expected_vif)\n for column, expected_vif in zip(rule.numeric_columns[:-1], expected_vifs_remove):\n vif = rule.get_variance_inflation_factor(column)\n self.assertAlmostEqual(vif, expected_vif)", "type": "inproject" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)", "type": "commited" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_evaluate(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_bad_dtypes(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, '10', 'cute animal #1', 100, '30'],\n ['500', 'CA', 10000, '20', 'intro to regression', 3000, '20'],\n [200, '', np.nan, 50, 'daily news #1', None, '75'],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, '20'],\n ['1000', 'MX', 45003, '20', 'cute animal #4', 90, '40'],\n [1500, 'MX', 75000, '30', '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, '25'],\n [None, 'US', 75000, '30', 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, '50', 'cute animal #3', 80, '20'],\n ['200', 'CA', 5000, '30', '', 10000, '30'],\n [800, 'US', 12050, '40', 'meme compilation', 2000, '45'],\n ['600', 'CA', 11000, '50', 'daily news #2', 3000, '50'],\n [600, 'CA', '', 50, '', 3000, None], \n ['700', 'MX', 11750, '20', 'cute animal #2', 2750, '55'],\n [700, '', None, 20, '', None, '55'], \n [700, 'MX', 11750, '', '', 2750, '55'], \n [1200, 'MX', 52000, '10', 'vc funding strats', 75, '60']\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'location': 'category',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'name': 'text',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)", "type": "non_informative" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_evaluate(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_bad_dtypes(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, '10', 'cute animal #1', 100, '30'],\n ['500', 'CA', 10000, '20', 'intro to regression', 3000, '20'],\n [200, '', np.nan, 50, 'daily news #1', None, '75'],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, '20'],\n ['1000', 'MX', 45003, '20', 'cute animal #4', 90, '40'],\n [1500, 'MX', 75000, '30', '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, '25'],\n [None, 'US', 75000, '30', 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, '50', 'cute animal #3', 80, '20'],\n ['200', 'CA', 5000, '30', '', 10000, '30'],\n [800, 'US', 12050, '40', 'meme compilation', 2000, '45'],\n ['600', 'CA', 11000, '50', 'daily news #2', 3000, '50'],\n [600, 'CA', '', 50, '', 3000, None], \n ['700', 'MX', 11750, '20', 'cute animal #2', 2750, '55'],\n [700, '', None, 20, '', None, '55'], \n [700, 'MX', 11750, '', '', 2750, '55'], \n [1200, 'MX', 52000, '10', 'vc funding strats', 75, '60']\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'location': 'category',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'name': 'text',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_dirty(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [200, np.nan, 50, None, 75], \n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1500, 75000, np.nan, 70, 25], \n [None, 75000, 30, 70, np.nan], \n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [600, '', 50, 3000, None], \n [700, 11750, 20, 2750, 55],\n [700, None, 20, None, 55], \n [700, 11750, '', 2750, 55], \n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n", "type": "non_informative" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):", "type": "non_informative" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n", "type": "non_informative" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_evaluate(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_bad_dtypes(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, '10', 'cute animal #1', 100, '30'],\n ['500', 'CA', 10000, '20', 'intro to regression', 3000, '20'],\n [200, '', np.nan, 50, 'daily news #1', None, '75'],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, '20'],\n ['1000', 'MX', 45003, '20', 'cute animal #4', 90, '40'],\n [1500, 'MX', 75000, '30', '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, '25'],\n [None, 'US', 75000, '30', 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, '50', 'cute animal #3', 80, '20'],\n ['200', 'CA', 5000, '30', '', 10000, '30'],\n [800, 'US', 12050, '40', 'meme compilation', 2000, '45'],\n ['600', 'CA', 11000, '50', 'daily news #2', 3000, '50'],\n [600, 'CA', '', 50, '', 3000, None], \n ['700', 'MX', 11750, '20', 'cute animal #2', 2750, '55'],\n [700, '', None, 20, '', None, '55'], \n [700, 'MX', 11750, '', '', 2750, '55'], \n [1200, 'MX', 52000, '10', 'vc funding strats', 75, '60']\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'location': 'category',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'name': 'text',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',", "type": "random" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_evaluate(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_bad_dtypes(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, '10', 'cute animal #1', 100, '30'],\n ['500', 'CA', 10000, '20', 'intro to regression', 3000, '20'],\n [200, '', np.nan, 50, 'daily news #1', None, '75'],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, '20'],\n ['1000', 'MX', 45003, '20', 'cute animal #4', 90, '40'],\n [1500, 'MX', 75000, '30', '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, '25'],\n [None, 'US', 75000, '30', 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, '50', 'cute animal #3', 80, '20'],\n ['200', 'CA', 5000, '30', '', 10000, '30'],\n [800, 'US', 12050, '40', 'meme compilation', 2000, '45'],\n ['600', 'CA', 11000, '50', 'daily news #2', 3000, '50'],\n [600, 'CA', '', 50, '', 3000, None], \n ['700', 'MX', 11750, '20', 'cute animal #2', 2750, '55'],\n [700, '', None, 20, '', None, '55'], \n [700, 'MX', 11750, '', '', 2750, '55'], \n [1200, 'MX', 52000, '10', 'vc funding strats', 75, '60']\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'location': 'category',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'name': 'text',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_dirty(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [200, np.nan, 50, None, 75], \n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1500, 75000, np.nan, 70, 25], \n [None, 75000, 30, 70, np.nan], \n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [600, '', 50, 3000, None], \n [700, 11750, 20, 2750, 55],\n [700, None, 20, None, 55], \n [700, 11750, '', 2750, 55], \n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_non_numeric(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, 10, 'cute animal #1', 100, 30],\n [500, 'CA', 10000, 20, 'intro to regression', 3000, 20],\n [200, '', np.nan, 50, 'daily news #1', None, 75],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, 20],\n [1000, 'MX', 45003, 20, 'cute animal #4', 90, 40],\n [1500, 'MX', 75000, 30, '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, 25],\n [None, 'US', 75000, 30, 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, 50, 'cute animal #3', 80, 20],\n [200, 'CA', 5000, 30, '', 10000, 30],", "type": "random" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_evaluate(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_bad_dtypes(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, '10', 'cute animal #1', 100, '30'],\n ['500', 'CA', 10000, '20', 'intro to regression', 3000, '20'],\n [200, '', np.nan, 50, 'daily news #1', None, '75'],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, '20'],\n ['1000', 'MX', 45003, '20', 'cute animal #4', 90, '40'],\n [1500, 'MX', 75000, '30', '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, '25'],\n [None, 'US', 75000, '30', 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, '50', 'cute animal #3', 80, '20'],\n ['200', 'CA', 5000, '30', '', 10000, '30'],\n [800, 'US', 12050, '40', 'meme compilation', 2000, '45'],\n ['600', 'CA', 11000, '50', 'daily news #2', 3000, '50'],\n [600, 'CA', '', 50, '', 3000, None], \n ['700', 'MX', 11750, '20', 'cute animal #2', 2750, '55'],\n [700, '', None, 20, '', None, '55'], \n [700, 'MX', 11750, '', '', 2750, '55'], \n [1200, 'MX', 52000, '10', 'vc funding strats', 75, '60']\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',", "type": "random" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_evaluate(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_bad_dtypes(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, '10', 'cute animal #1', 100, '30'],\n ['500', 'CA', 10000, '20', 'intro to regression', 3000, '20'],\n [200, '', np.nan, 50, 'daily news #1', None, '75'],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, '20'],\n ['1000', 'MX', 45003, '20', 'cute animal #4', 90, '40'],\n [1500, 'MX', 75000, '30', '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, '25'],\n [None, 'US', 75000, '30', 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, '50', 'cute animal #3', 80, '20'],\n ['200', 'CA', 5000, '30', '', 10000, '30'],\n [800, 'US', 12050, '40', 'meme compilation', 2000, '45'],\n ['600', 'CA', 11000, '50', 'daily news #2', 3000, '50'],\n [600, 'CA', '', 50, '', 3000, None], \n ['700', 'MX', 11750, '20', 'cute animal #2', 2750, '55'],\n [700, '', None, 20, '', None, '55'], \n [700, 'MX', 11750, '', '', 2750, '55'], \n [1200, 'MX', 52000, '10', 'vc funding strats', 75, '60']\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'location': 'category',", "type": "random" }, { "content": "from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns\nfrom tests.base_test import TestCase\nfrom pandas.util.testing import assert_frame_equal\nimport numpy as np\nimport pandas as pd\n\n\nclass RemoveCollinearColumnsTests(TestCase):\n def setUp(self):\n self.rng = np.random.default_rng(42)\n return super().setUp()\n\n def test_categorical_data_frame(self):\n df = pd.DataFrame([\n [1, 1000, '2021-10-01', '2021-09-01'],\n [1, 1050, '2021-10-01', '2021-08-01'],\n [1, 1100, '2021-10-01', '2021-01-01'],\n [2, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'category',\n 'order_id': 'category',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_clean_removes_all_data_frame(self):\n df = pd.DataFrame([\n [None, 1000, '2021-10-01', '2021-09-01'],\n [1, None, '2021-10-01', '2021-08-01'],\n [np.nan, 1100, '2021-10-01', '2021-01-01'],\n [None, 1150, '2021-09-01', '2021-08-01'],\n ], columns=[\n 'group_id',\n 'order_id',\n 'group_churned_at',\n 'order_created_at',\n ])\n column_types = {\n 'group_id': 'number',\n 'order_id': 'number',\n 'group_churned_at': 'datetime',\n 'order_created_at': 'datetime'\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_collinear_no_results(self):\n df = pd.DataFrame([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ], columns=['number_of_users', 'views', 'revenue', 'losses'])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'revenue': 'number',\n 'losses': 'number',\n }\n statistics = {}\n result = RemoveCollinearColumns(df, column_types, statistics).evaluate()\n self.assertEqual(result, [])\n\n def test_evaluate(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_bad_dtypes(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, '10', 'cute animal #1', 100, '30'],\n ['500', 'CA', 10000, '20', 'intro to regression', 3000, '20'],\n [200, '', np.nan, 50, 'daily news #1', None, '75'],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, '20'],\n ['1000', 'MX', 45003, '20', 'cute animal #4', 90, '40'],\n [1500, 'MX', 75000, '30', '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, '25'],\n [None, 'US', 75000, '30', 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, '50', 'cute animal #3', 80, '20'],\n ['200', 'CA', 5000, '30', '', 10000, '30'],\n [800, 'US', 12050, '40', 'meme compilation', 2000, '45'],\n ['600', 'CA', 11000, '50', 'daily news #2', 3000, '50'],\n [600, 'CA', '', 50, '', 3000, None], \n ['700', 'MX', 11750, '20', 'cute animal #2', 2750, '55'],\n [700, '', None, 20, '', None, '55'], \n [700, 'MX', 11750, '', '', 2750, '55'], \n [1200, 'MX', 52000, '10', 'vc funding strats', 75, '60']\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'location': 'category',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'name': 'text',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_dirty(self):\n df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [200, np.nan, 50, None, 75], \n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1500, 75000, np.nan, 70, 25], \n [None, 75000, 30, 70, np.nan], \n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [600, '', 50, 3000, None], \n [700, 11750, 20, 2750, 55],\n [700, None, 20, None, 55], \n [700, 11750, '', 2750, 55], \n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[\n 'number_of_users',\n 'views',\n 'number_of_creators',\n 'losses',\n 'number_of_advertisers'\n ]).astype(float)\n column_types = {\n 'number_of_users': 'number',\n 'views': 'number',\n 'number_of_creators': 'number',\n 'losses': 'number',\n 'number_of_advertisers': 'number'\n }\n statistics = {}\n rule = RemoveCollinearColumns(df, column_types, statistics)\n assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True))\n results = rule.evaluate()\n expected_results = [\n dict(\n title='Remove collinear columns',\n message='The following columns are strongly correlated '\n 'with other columns in the dataset: [\\'number_of_users\\']. '\n 'Removing these columns may increase data quality '\n 'by removing redundant and closely related data.',\n action_payload=dict(\n action_type='remove',\n action_arguments=['number_of_users'],\n axis='column',\n action_options = {},\n action_variables = {},\n action_code = '',\n outputs = [],\n )\n )\n ]\n self.assertEqual(results, expected_results)\n\n def test_evaluate_non_numeric(self):\n df = pd.DataFrame([\n [1000, 'US', 30000, 10, 'cute animal #1', 100, 30],\n [500, 'CA', 10000, 20, 'intro to regression', 3000, 20],\n [200, '', np.nan, 50, 'daily news #1', None, 75],\n [250, 'CA', 7500, 25, 'machine learning seminar', 8000, 20],\n [1000, 'MX', 45003, 20, 'cute animal #4', 90, 40],\n [1500, 'MX', 75000, 30, '', 70, 25],\n [1500, 'US', 75000, np.nan, 'daily news #3', 70, 25],\n [None, 'US', 75000, 30, 'tutorial: how to start a startup', 70, np.nan],\n [1250, 'US', 60000, 50, 'cute animal #3', 80, 20],\n [200, 'CA', 5000, 30, '', 10000, 30],\n [800, 'US', 12050, 40, 'meme compilation', 2000, 45],\n [600, 'CA', 11000, 50, 'daily news #2', 3000, 50],\n [600, 'CA', '', 50, '', 3000, None], \n [700, 'MX', 11750, 20, 'cute animal #2', 2750, 55],\n [700, '', None, 20, '', None, 55], \n [700, 'MX', 11750, '', '', 2750, 55], \n [1200, 'MX', 52000, 10, 'vc funding strats', 75, 60]\n ], columns=[\n 'number_of_users',\n 'location',\n 'views',\n 'number_of_creators',\n 'name',\n 'losses',\n 'number_of_advertisers'\n ])\n cleaned_df = pd.DataFrame([\n [1000, 30000, 10, 100, 30],\n [500, 10000, 20, 3000, 20],\n [250, 7500, 25, 8000, 20],\n [1000, 45003, 20, 90, 40],\n [1500, 75000, 30, 70, 25],\n [1250, 60000, 50, 80, 20],\n [200, 5000, 30, 10000, 30],\n [800, 12050, 40, 2000, 45],\n [600, 11000, 50, 3000, 50],\n [700, 11750, 20, 2750, 55],\n [1200, 52000, 10, 75, 60]\n ], columns=[", "type": "random" } ]
[ " results = rule.evaluate()", " action_type='remove',", " axis='column',", " result = RemoveCollinearColumns(df, column_types, statistics).evaluate()", " rule.numeric_df.drop(column, axis=1, inplace=True)", " return super().setUp()", "", " def test_evaluate_non_numeric(self):", " self.rng = np.random.default_rng(42)", " def test_collinear_no_results(self):", " action_payload=dict(", " [800, 'US', 12050, 40, 'meme compilation', 2000, 45],", " 'number_of_advertisers'", " 'views': 'number',", " 'number_of_users'," ]
METASEP
20
mage-ai__mage-ai
mage-ai__mage-ai METASEP mage_ai/tests/data_cleaner/transformer_actions/test_variable_replacer.py METASEP from data_cleaner.transformer_actions.variable_replacer import interpolate, replace_true_false from tests.base_test import TestCase from tests.data_cleaner.transformer_actions.shared import TEST_ACTION class VariableReplacerTests(TestCase): def test_interpolate(self): text = TEST_ACTION['action_code'] key1 = '1_1' variable_data1 = TEST_ACTION['action_variables'][key1] key2 = '1_2' variable_data2 = TEST_ACTION['action_variables'][key2] key3 = '1' variable_data3 = TEST_ACTION['action_variables'][key3] self.assertEqual( interpolate( interpolate(interpolate(text, key1, variable_data1), key2, variable_data2), key3, variable_data3, ), 'omni.deposited == True and (omni.fund == "The Quant" or omni.fund == "Yield")', ) def test_replace_true_false(self): action_code = 'a == false and b == true or (a == true and b == false) and ' \ 'a == False and b == True or a == "true" and b == "false" or ' \ "a == 'false' and b == 'true' or a == 'True' and b == 'False'" result = 'a == False and b == True or (a == True and b == False) and ' \ 'a == False and b == True or a == "true" and b == "false" or ' \ "a == 'false' and b == 'true' or a == 'True' and b == 'False'" self.assertEqual(replace_true_false(action_code), result) mage_ai/tests/data_cleaner/transformer_actions/test_utils.py METASEP from data_cleaner.transformer_actions.constants import ActionType, Axis from data_cleaner.transformer_actions.utils import columns_to_remove from tests.base_test import TestCase class UtilsTests(TestCase): def test_columns_to_remove(self): transformer_actions = [ dict( action_type=ActionType.FILTER, axis=Axis.COLUMN, action_arguments=['wand'], ), dict( action_type=ActionType.REMOVE, axis=Axis.ROW, action_arguments=['spear'], ), dict( action_type=ActionType.REMOVE, axis=Axis.COLUMN, action_arguments=['sword'], ), ] self.assertEqual(columns_to_remove(transformer_actions), ['sword']) mage_ai/tests/data_cleaner/transformer_actions/test_row.py METASEP from data_cleaner.transformer_actions.base import BaseAction from data_cleaner.transformer_actions.row import ( drop_duplicates, # explode, filter_rows, sort_rows, ) from pandas.util.testing import assert_frame_equal from tests.base_test import TestCase import numpy as np import pandas as pd class RowTests(TestCase): def test_drop_duplicates(self): df = pd.DataFrame([ [0, False, 'a'], [1, True, 'b'], [1, True, 'c'], [0, True, 'd'], [1, True, 'b'], ], columns=[ 'integer', 'boolean', 'string', ]) test_cases = [ (dict(action_arguments=['integer']), df.iloc[[3, 4]]), (dict(action_arguments=['integer'], action_options=dict(keep='first')), df.iloc[[0, 1]]), (dict(action_arguments=['boolean']), df.iloc[[0, 4]]), (dict(action_arguments=['boolean'], action_options=dict(keep='first')), df.iloc[[0, 1]]), (dict(action_arguments=['integer', 'boolean']), df.iloc[[0, 3, 4]]), (dict(action_arguments=[]), df.iloc[[0, 2, 3, 4]]), ] for action, val in test_cases: self.assertTrue(drop_duplicates(df, action).equals(val)) # def test_explode(self): # df = pd.DataFrame([ # ['(a, b, c)'], # ['[b, c, d]'], # [' e, f '], # ], columns=['tags']) # action = dict( # action_arguments=['tags'], # action_options={ # 'separator': ',', # }, # outputs=[ # dict( # uuid='tag', # column_type='text', # ), # ], # ) # df_new = explode(df, action) # df_expected = pd.DataFrame([ # ['a', '(a, b, c)'], # ['b', '(a, b, c)'], # ['c', '(a, b, c)'], # ['b', '[b, c, d]'], # ['c', '[b, c, d]'], # ['d', '[b, c, d]'], # ['e', ' e, f '], # ['f', ' e, f '], # ], columns=['tag', 'tags']) # assert_frame_equal(df_new.reset_index(drop=True), df_expected) def test_filter_rows(self): df = pd.DataFrame([ [0, False, 'a'], [1, True, 'b'], ], columns=[ 'integer', 'boolean', 'string', ]) test_cases = [ ([0, False, 'a'], 'integer == 0'), ([0, False, 'a'], 'string == \'a\''), ([1, True, 'b'], 'boolean == True'), ([1, True, 'b'], 'integer >= 1'), ([1, True, 'b'], 'integer >= 1 and boolean == True'), ([1, True, 'b'], 'integer >= 1 and (boolean == False or string == \'b\')'), ] for val, query in test_cases: self.assertEqual( val, filter_rows(df, dict(action_code=query)).iloc[0].values.tolist(), ) def test_filter_rows_is_null(self): df = pd.DataFrame([ [None, False, 'a'], [2, True, 'b'], [3, False, 'c'], [1, None, 'a'], [2, True, 'b'], [3, '', 'c'], [1, False, None], [2, True, 'b'], [3, False, ''], ], columns=[ 'integer', 'boolean', 'string', ]) integer_rows = filter_rows( df, dict(action_code='integer == null'), original_df=df, ).values.tolist() self.assertEqual(len(integer_rows), 1) self.assertEqual(integer_rows[0][1], False) self.assertEqual(integer_rows[0][2], 'a') boolean_rows = filter_rows( df, dict(action_code='boolean == null'), original_df=df, ).values.tolist() self.assertEqual(len(boolean_rows), 2) self.assertEqual(boolean_rows[0][0], 1.0) self.assertEqual(boolean_rows[0][1], None) self.assertEqual(boolean_rows[0][2], 'a') self.assertEqual(boolean_rows[1][0], 3.0) self.assertEqual(boolean_rows[1][1], '') self.assertEqual(boolean_rows[1][2], 'c') string_rows = filter_rows( df, dict(action_code='string == null'), original_df=df, ).values.tolist() self.assertEqual(len(string_rows), 2) self.assertEqual(string_rows[0][0], 1.0) self.assertEqual(string_rows[0][1], False) self.assertEqual(string_rows[0][2], None) self.assertEqual(string_rows[1][0], 3.0) self.assertEqual(string_rows[1][1], False) self.assertEqual(string_rows[1][2], '') def test_filter_rows_is_not_null(self): df = pd.DataFrame([ [None, False, 'a'], [2, True, 'b'], [3, False, 'c'], [1, None, 'a'], [2, True, 'b'], [3, '', 'c'], [1, False, None], [2, True, 'b'], [3, False, ''], ], columns=[ 'integer', 'boolean', 'string', ]) integer_rows = filter_rows( df, dict(action_code='integer != null'), original_df=df, )['integer'].values.tolist() self.assertEqual(integer_rows, [ 2, 3, 1, 2, 3, 1, 2, 3, ]) boolean_rows = filter_rows( df, dict(action_code='boolean != null'), original_df=df, )['boolean'].values.tolist() self.assertEqual(boolean_rows, [ False, True, False, True, False, True, False, ]) string_rows = filter_rows( df, dict(action_code='string != null'), original_df=df, )['string'].values.tolist() self.assertEqual(string_rows, [ 'a', 'b', 'c', 'a', 'b', 'c', 'b', ]) def test_filter_row_contains_string(self): df = pd.DataFrame([ ['fsdijfosidjfiosfj'], ['[email protected]'], [np.NaN], ['fsdfsdfdsfdsf'], ['[email protected]'], ], columns=[ 'id', ]) action = dict( action_code='id contains @', ) action2 = dict( action_code='id contains \'@\'', ) df_new = filter_rows(df, action, original_df=df).reset_index(drop=True) df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True) df_expected = pd.DataFrame([ ['[email protected]'], ['[email protected]'], ], columns=[ 'id', ]) assert_frame_equal(df_new, df_expected) assert_frame_equal(df_new2, df_expected) def test_filter_row_not_contains_string(self): df = pd.DataFrame([ [np.NaN, False], ['[email protected]', True], ['[email protected]', True], ['fsdfsdfdsfdsf', False], ['[email protected]', False], ['eeeeasdf', True] ], columns=[ 'email', 'subscription' ]) action = dict( action_code='email not contains mailnet', ) action2 = dict( action_code='email not contains \'mailnet\'', ) action3 = dict( action_code = 'email not contains @', ) action4 = dict( action_code = 'email not contains \'^e+\w\'', ) action_invalid = dict( action_code='subscription not contains False' ) df_new = filter_rows(df, action, original_df=df).reset_index(drop=True) df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True) df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True) df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True) df_expected1 = pd.DataFrame([ [np.NaN, False], ['[email protected]', True], ['fsdfsdfdsfdsf', False], ['eeeeasdf', True] ], columns=[ 'email', 'subscription' ]) df_expected2 = pd.DataFrame([ [np.NaN, False], ['fsdfsdfdsfdsf', False], ['eeeeasdf', True] ], columns=[ 'email', 'subscription' ]) df_expected3 = pd.DataFrame([ [np.NaN, False], ['[email protected]', True], ['[email protected]', True], ['fsdfsdfdsfdsf', False], ['[email protected]', False] ], columns=[ 'email', 'subscription' ]) assert_frame_equal(df_new, df_expected1) assert_frame_equal(df_new2, df_expected1) assert_frame_equal(df_new3, df_expected2) assert_frame_equal(df_new4, df_expected3) with self.assertRaises(Exception): _ = filter_rows(df, action_invalid, original_df=df).reset_index(drop=True) def test_filter_rows_multi_condition(self): df = pd.DataFrame( [ [100, None, '', 10], [250, 'brand1', False, np.NaN], [np.NaN, 'brand2', None, 18], [50, 'brand1', True, 13], [75, '', '', 80], [None, 'company3', False, 23], ], columns=['value', 'brand', 'discounted', 'inventory'] ) action = dict(action_code='(value < 110 and value >= 50) and (value != null)') action2 = dict(action_code='brand contains brand and inventory != null') action3 = dict(action_code='(brand != null and value > 60) or (discounted == null)') action4 = dict( action_code='(discounted == True and inventory > 15)' ' or (discounted == False and value != null)' ) action5 = dict( action_code='(brand not contains company and value == 75 and inventory <= 80)' ' or (discounted != null)' ) df_expected = pd.DataFrame( [ [100, None, '', 10], [50, 'brand1', True, 13], [75, '', '', 80], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_expected2 = pd.DataFrame( [ [np.NaN, 'brand2', None, 18], [50, 'brand1', True, 13], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_expected3 = pd.DataFrame( [ [100, None, '', 10], [250, 'brand1', False, np.NaN], [np.NaN, 'brand2', None, 18], [75, '', '', 80], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_expected4 = pd.DataFrame( [ [250, 'brand1', False, np.NaN], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_expected5 = pd.DataFrame( [ [250, 'brand1', False, np.NaN], [50, 'brand1', True, 13], [75, '', '', 80], [None, 'company3', False, 23], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_new = filter_rows(df, action, original_df=df).reset_index(drop=True) df_new2 = filter_rows(df, action2, original_df=df).reset_index(drop=True) df_new3 = filter_rows(df, action3, original_df=df).reset_index(drop=True) df_new4 = filter_rows(df, action4, original_df=df).reset_index(drop=True) df_new5 = filter_rows(df, action5, original_df=df).reset_index(drop=True) df_new['value'] = df_new['value'].astype(int) df_new['inventory'] = df_new['inventory'].astype(int) df_new2['brand'] = df_new2['brand'].astype(str) df_new2['inventory'] = df_new2['inventory'].astype(int) df_new4['value'] = df_new4['value'].astype(int) df_new4['brand'] = df_new4['brand'].astype(str) df_new4['discounted'] = df_new4['discounted'].astype(bool) assert_frame_equal(df_expected, df_new) assert_frame_equal(df_expected2, df_new2) assert_frame_equal(df_expected3, df_new3) assert_frame_equal(df_expected4, df_new4) assert_frame_equal(df_expected5, df_new5) def test_filter_row_implicit_null(self): # tests that implicit null values in the transformed dataframe are still removed df = pd.DataFrame( [ [100, None, '', 10], [250, 'brand1', False, np.NaN], [np.NaN, 'brand2', None, 18], [50, 'brand1', True, 13], [75, '', '', 80], [None, 'company3', False, 23], ], columns=['value', 'brand', 'discounted', 'inventory'] ) action_payload = { 'action_type': 'filter', 'action_code': '%{1} != null', 'action_arguments': [], 'action_options': {}, 'axis': 'row', 'action_variables': { '1': { 'id': 'value', 'type': 'feature', 'feature': { 'column_type': 'number', 'uuid': 'value' } }, }, 'outputs': [] } action = BaseAction(action_payload) df_new = action.execute(df).reset_index(drop=True) df_expected = pd.DataFrame( [ [100, None, '', 10], [250, 'brand1', False, np.NaN], [50, 'brand1', True, 13], [75, '', '', 80], ], columns=['value', 'brand', 'discounted', 'inventory'] ) df_new['value'] = df_new['value'].astype(int) assert_frame_equal(df_expected, df_new) def test_original_df_column_name_padding(self): # tests edge cases for when columns with the special prefixes "orig_" and "tf_" are given as input df = pd.DataFrame([ [0,1, None], [1,2, np.NaN], [np.NaN, 3, 4], [3, None, 5] ], columns=[ 'col', 'orig_col', 'tf_col' ]) df_expected = pd.DataFrame([ [0,1, None], [1,2, np.NaN], ], columns=[ 'col', 'orig_col', 'tf_col' ]) action = dict(action_code='(col != null) and (orig_col != null)') df_new = filter_rows(df, action, original_df = df) df_new['col'] = df_new['col'].astype(int) df_new['orig_col'] = df_new['orig_col'].astype(int) assert_frame_equal(df_new, df_expected) def test_sort_rows(self): df = pd.DataFrame([ [0, False, 'a'], [1, True, 'b'], [1, True, 'c'], [0, True, 'd'], ], columns=[ 'integer', 'boolean', 'string', ]) test_cases = [ (dict(action_arguments=['integer']), df.iloc[[0, 3, 1, 2]]), (dict(action_arguments=['integer'], action_options=dict(ascending=False)), df.iloc[[1, 2, 0, 3]]), (dict(action_arguments=['string']), df.iloc[[0, 1, 2, 3]]), (dict(action_arguments=['string'], action_options=dict(ascending=False)), df.iloc[[3, 2, 1, 0]]), ] for action, val in test_cases: self.assertTrue(sort_rows(df, action).equals(val)) def test_sort_rows_with_multiple_columns(self): df = pd.DataFrame([ [0, False, 'a'], [1, True, 'b'], [1, True, 'c'], [0, True, 'd'], ], columns=[ 'integer', 'boolean', 'string', ]) test_cases = [ (dict(action_arguments=['integer', 'string']), df.iloc[[0, 3, 1, 2]]), (dict(action_arguments=['integer', 'string'], action_options=dict(ascendings=[False, False])), df.iloc[[2, 1, 3, 0]]), (dict(action_arguments=['integer', 'string'], action_options=dict(ascendings=[True, False])), df.iloc[[3, 0, 2, 1]]), (dict(action_arguments=['string', 'integer'], action_options=dict(ascending=False)), df.iloc[[3, 2, 1, 0]]), ] for action, val in test_cases: self.assertTrue(sort_rows(df, action).equals(val)) def test_sort_rows_with_number_and_empty_strings(self): df = pd.DataFrame([ [0], [None], [3], [''], [1], [2], ], columns=[ 'integer', ]) test_cases = [ (dict(ascending=True), df.iloc[[1, 3, 0, 4, 5, 2]]), (dict(ascending=False), df.iloc[[2, 5, 4, 0, 1, 3]]), ] for action_options, val in test_cases: action = dict( action_arguments=['integer'], action_variables={ '1': dict( feature=dict( column_type='number', uuid='integer', ), ), }, action_options=action_options, ) self.assertTrue(sort_rows(df, action).equals(val)) mage_ai/tests/data_cleaner/transformer_actions/test_helpers.py METASEP from data_cleaner.transformer_actions.helpers import extract_join_feature_set_version_id from tests.base_test import TestCase class ColumnTests(TestCase): def test_extract_join_feature_set_version_id(self): payload1 = dict( action_type='join', action_arguments=[100], action_options=dict( left_on=['user_id'], right_on=['id'], ), ) payload2 = dict( action_type='join', action_arguments=['%{1}'], action_options=dict( left_on=['user_id'], right_on=['id'], ), action_variables={ '1': { 'id': 200, 'type': 'feature_set_version', }, }, ) payload3 = dict( action_type='filter', ) fsv_id1 = extract_join_feature_set_version_id(payload1) fsv_id2 = extract_join_feature_set_version_id(payload2) fsv_id3 = extract_join_feature_set_version_id(payload3) self.assertEqual(fsv_id1, 100) self.assertEqual(fsv_id2, 200) self.assertEqual(fsv_id3, None) mage_ai/tests/data_cleaner/transformer_actions/test_column.py METASEP from data_cleaner.transformer_actions.column import ( add_column, count, count_distinct, clean_column_name, diff, # expand_column, first, last, remove_column, select, shift_down, shift_up, ) from pandas.util.testing import assert_frame_equal from tests.base_test import TestCase import numpy as np import pandas as pd TEST_DATAFRAME = pd.DataFrame([ [1, 1000], [2, 1050], [1, 1100], [2, 1150], ], columns=[ 'group_id', 'amount', ]) class ColumnTests(TestCase): def test_remove_column(self): df = pd.DataFrame([ [0, False, 'a'], [1, True, 'b'], ], columns=[ 'integer', 'boolean', 'string', ]) action = dict(action_arguments=['string']) df_new = remove_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( integer=0, boolean=False, ), dict( integer=1, boolean=True, ), ]) action = dict(action_arguments=['integer', 'boolean']) df_new = remove_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( string='a', ), dict( string='b', ), ]) def test_add_column_addition(self): df = pd.DataFrame([ [1, 3, 7, 9], [4, 2, 9, 3], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', ]) action1 = dict( action_arguments=[ 'integer1', 'integer2', 'integer3', ], action_options={ 'udf': 'addition', 'value': None, }, outputs=[ dict( uuid='integer_addition', column_type='number', ), ], ) action2 = dict( action_arguments=['integer1'], action_options={ 'udf': 'addition', 'value': 10, }, outputs=[ dict( uuid='integer_addition2', column_type='number', ), ], ) action3 = dict( action_arguments=['integer1', 'integer4'], action_options={ 'udf': 'addition', 'value': 10, }, outputs=[ dict( uuid='integer_addition3', column_type='number', ), ], ) df_new = add_column( add_column( add_column(df, action1), action2, ), action3, ) df_expected = pd.DataFrame([ [1, 3, 7, 9, 11, 11, 20], [4, 2, 9, 3, 15, 14, 17], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', 'integer_addition', 'integer_addition2', 'integer_addition3', ]) assert_frame_equal(df_new, df_expected) def test_add_column_addition_days(self): df = pd.DataFrame([ ['2021-08-31'], ['2021-08-28'], ], columns=[ 'created_at', ]) action = dict( action_arguments=['created_at'], action_options=dict( column_type='datetime', time_unit='d', udf='addition', value=3, ), outputs=[ dict( uuid='3d_after_creation', column_type='text', ), ], ) df_new = add_column(df, action) df_expected = pd.DataFrame([ ['2021-08-31', '2021-09-03 00:00:00'], ['2021-08-28', '2021-08-31 00:00:00'], ], columns=[ 'created_at', '3d_after_creation' ]) assert_frame_equal(df_new, df_expected) def test_add_column_constant(self): df = pd.DataFrame([ [False], [True], ], columns=[ 'boolean', ]) action = dict( action_arguments=[10], action_options=dict( udf='constant', ), outputs=[ dict( uuid='integer', column_type='number', ), ], ) df_new = add_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( boolean=False, integer=10, ), dict( boolean=True, integer=10, ), ]) def test_add_column_date_trunc(self): df = pd.DataFrame([ ['2021-08-31', False], ['2021-08-28', True], ], columns=[ 'created_at', 'boolean', ]) action = dict( action_arguments=['created_at'], action_options=dict( udf='date_trunc', date_part='week', ), outputs=[ dict( uuid='week_date', column_type='text', ), ], ) df_new = add_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( created_at='2021-08-31', boolean=False, week_date='2021-08-30', ), dict( created_at='2021-08-28', boolean=True, week_date='2021-08-23', ), ]) def test_add_column_difference(self): df = pd.DataFrame([ [1, 3], [4, 2], ], columns=[ 'integer1', 'integer2', ]) action1 = dict( action_arguments=['integer1', 'integer2'], action_options={ 'udf': 'difference', }, outputs=[ dict( uuid='integer_difference', column_type='number', ), ], ) action2 = dict( action_arguments=['integer1'], action_options={ 'udf': 'difference', 'value': 10, }, outputs=[ dict( uuid='integer_difference2', column_type='number', ), ], ) df_new = add_column(add_column(df, action1), action2) df_expected = pd.DataFrame([ [1, 3, -2, -9], [4, 2, 2, -6], ], columns=[ 'integer1', 'integer2', 'integer_difference', 'integer_difference2' ]) assert_frame_equal(df_new, df_expected) def test_add_column_difference_days(self): df = pd.DataFrame([ ['2021-08-31', '2021-09-14'], ['2021-08-28', '2021-09-03'], ], columns=[ 'created_at', 'converted_at', ]) action = dict( action_arguments=['converted_at', 'created_at'], action_options=dict( column_type='datetime', time_unit='d', udf='difference', ), outputs=[ dict( uuid='days_diff', column_type='number', ), ], ) df_new = add_column(df, action) df_expected = pd.DataFrame([ ['2021-08-31', '2021-09-14', 14], ['2021-08-28', '2021-09-03', 6], ], columns=[ 'created_at', 'converted_at', 'days_diff', ]) assert_frame_equal(df_new, df_expected) def test_add_column_distance_between(self): df = pd.DataFrame([ [26.05308, -97.31838, 33.41939, -112.32606], [39.71954, -84.13056, 33.41939, -112.32606], ], columns=[ 'lat1', 'lng1', 'lat2', 'lng2', ]) action = dict( action_arguments=['lat1', 'lng1', 'lat2', 'lng2'], action_options=dict( udf='distance_between', ), outputs=[ dict( uuid='distance', column_type='number_with_decimals', ), ], ) df_new = add_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( lat1=26.05308, lng1=-97.31838, lat2=33.41939, lng2=-112.32606, distance=1661.8978520305657, ), dict( lat1=39.71954, lng1=-84.13056, lat2=33.41939, lng2=-112.32606, distance=2601.5452571116184, ), ]) def test_add_column_divide(self): df = pd.DataFrame([ [12, 3, 70, 9], [4, 2, 90, 3], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', ]) action1 = dict( action_arguments=[ 'integer1', 'integer2', ], action_options={ 'udf': 'divide', }, outputs=[ dict( uuid='integer_divide', column_type='number', ), ], ) action2 = dict( action_arguments=['integer3'], action_options={ 'udf': 'divide', 'value': 10, }, outputs=[ dict( uuid='integer_divide2', column_type='number', ), ], ) df_new = add_column(add_column(df, action1), action2) df_expected = pd.DataFrame([ [12, 3, 70, 9, 4, 7], [4, 2, 90, 3, 2, 9], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', 'integer_divide', 'integer_divide2' ]) assert_frame_equal(df_new, df_expected) # def test_add_column_extract_dict_string(self): # df = pd.DataFrame([ # '{\'country\': \'US\', \'age\': \'20\'}', # '{\'country\': \'CA\'}', # '{\'country\': \'UK\', \'age\': \'24\'}', # '', # ], columns=[ # 'properties', # ]) # action = dict( # action_arguments=['properties', 'country'], # action_options=dict( # udf='extract_dict_value', # ), # outputs=[ # dict( # uuid='property_country', # column_type='text', # ), # ], # ) # df_new = add_column(df, action) # self.assertEqual(df_new.to_dict(orient='records'), [ # dict( # properties='{\'country\': \'US\', \'age\': \'20\'}', # property_country='US', # ), # dict( # properties='{\'country\': \'CA\'}', # property_country='CA', # ), # dict( # properties='{\'country\': \'UK\', \'age\': \'24\'}', # property_country='UK', # ), # dict( # properties='', # property_country=np.NaN, # ), # ]) # action2 = dict( # action_arguments=['properties', 'age'], # action_options=dict( # udf='extract_dict_value', # ), # outputs=[ # dict( # uuid='property_age', # column_type='number', # ), # ], # ) # df_new2 = add_column(df, action2) # self.assertEqual(df_new2.to_dict(orient='records'), [ # dict( # properties='{\'country\': \'US\', \'age\': \'20\'}', # property_age=20, # ), # dict( # properties='{\'country\': \'CA\'}', # property_age=0, # ), # dict( # properties='{\'country\': \'UK\', \'age\': \'24\'}', # property_age=24, # ), # dict( # properties='', # property_age=0, # ), # ]) # def test_add_column_extract_dict_string_with_json(self): # df = pd.DataFrame([ # '{\"country\": \"US\", \"is_adult\": true}', # '{\"country\": \"CA\"}', # '{\"country\": \"UK\", \"is_adult\": false}', # '', # ], columns=[ # 'properties', # ]) # action = dict( # action_arguments=['properties', 'country'], # action_options=dict( # udf='extract_dict_value', # ), # outputs=[ # dict( # uuid='property_country', # column_type='text', # ), # ], # ) # df_new = add_column(df, action) # self.assertEqual(df_new.to_dict(orient='records'), [ # dict( # properties='{\"country\": \"US\", \"is_adult\": true}', # property_country='US', # ), # dict( # properties='{\"country\": \"CA\"}', # property_country='CA', # ), # dict( # properties='{\"country\": \"UK\", \"is_adult\": false}', # property_country='UK', # ), # dict( # properties='', # property_country=np.NaN, # ), # ]) # action2 = dict( # action_arguments=['properties', 'is_adult'], # action_options=dict( # udf='extract_dict_value', # ), # outputs=[ # dict( # uuid='property_is_adult', # column_type='true_or_false', # ), # ], # ) # df_new2 = add_column(df, action2) # self.assertEqual(df_new2.to_dict(orient='records'), [ # dict( # properties='{\"country\": \"US\", \"is_adult\": true}', # property_is_adult=True, # ), # dict( # properties='{\"country\": \"CA\"}', # property_is_adult=None, # ), # dict( # properties='{\"country\": \"UK\", \"is_adult\": false}', # property_is_adult=False, # ), # dict( # properties='', # property_is_adult=None, # ), # ]) def test_add_column_formatted_date(self): df = pd.DataFrame([ ['2019-04-10 08:20:58', False], ['2019-03-05 03:30:30', True], ], columns=[ 'created_at', 'boolean', ]) action = dict( action_arguments=['created_at'], action_options=dict( udf='formatted_date', format='%Y-%m-%d', ), outputs=[ dict( uuid='created_date', column_type='text', ), ], ) df_new = add_column(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( created_at='2019-04-10 08:20:58', boolean=False, created_date='2019-04-10', ), dict( created_at='2019-03-05 03:30:30', boolean=True, created_date='2019-03-05', ), ]) def test_add_column_if_else(self): df = pd.DataFrame([ ['2019-04-10 08:20:58'], [None], ], columns=[ 'converted_at' ]) action = dict( action_arguments=[False, True], action_code='converted_at == null', action_options=dict( udf='if_else', ), outputs=[ dict( uuid='converted', column_type='true_or_false', ), ], ) df_new = add_column(df, action, original_df=df) self.assertEqual(df_new.to_dict(orient='records'), [ dict( converted_at='2019-04-10 08:20:58', converted=True, ), dict( converted_at=None, converted=False, ), ]) def test_add_column_if_else_with_column(self): df = pd.DataFrame([ ['2019-04-10 08:20:58', 'test_user_id'], [None, None], ], columns=[ 'converted_at', 'user_id', ]) action = dict( action_arguments=['unknown', 'user_id'], action_code='converted_at == null', action_options=dict( udf='if_else', arg1_type='value', arg2_type='column', ), outputs=[ dict( uuid='user_id_clean', column_type='text', ), ], ) df_new = add_column(df, action, original_df=df) self.assertEqual(df_new.to_dict(orient='records'), [ dict( converted_at='2019-04-10 08:20:58', user_id='test_user_id', user_id_clean='test_user_id', ), dict( converted_at=None, user_id=None, user_id_clean='unknown', ), ]) def test_add_column_multiply(self): df = pd.DataFrame([ [1, 3, 7, 9], [4, 2, 9, 3], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', ]) action1 = dict( action_arguments=[ 'integer1', 'integer2', ], action_options={ 'udf': 'multiply', }, outputs=[ dict( uuid='integer_multiply', column_type='number', ), ], ) action2 = dict( action_arguments=['integer3'], action_options={ 'udf': 'multiply', 'value': 10, }, outputs=[ dict( uuid='integer_multiply2', column_type='number', ), ], ) df_new = add_column(add_column(df, action1), action2) df_expected = pd.DataFrame([ [1, 3, 7, 9, 3, 70], [4, 2, 9, 3, 8, 90], ], columns=[ 'integer1', 'integer2', 'integer3', 'integer4', 'integer_multiply', 'integer_multiply2' ]) assert_frame_equal(df_new, df_expected) def test_add_column_string_replace(self): df = pd.DataFrame([ ['$1000'], ['$321. '], ['$4,321'], ], columns=[ 'amount', ]) action = dict( action_arguments=['amount'], action_options={ 'udf': 'string_replace', 'pattern': '\\$|\\.|\\,|\\s*', 'replacement': '', }, outputs=[ dict( uuid='amount_clean', column_type='true_or_false', ), ], ) df_new = add_column(df, action) df_expected = pd.DataFrame([ ['$1000', '1000'], ['$321. ', '321'], ['$4,321', '4321'], ], columns=[ 'amount', 'amount_clean', ]) assert_frame_equal(df_new, df_expected) def test_add_column_string_split(self): df = pd.DataFrame([ ['Street1, Long Beach, CA, '], ['Street2,Vernon, CA, 123'], ['Pacific Coast Highway, Los Angeles, CA, 111'], ], columns=[ 'location', ]) action = dict( action_arguments=['location'], action_options={ 'udf': 'string_split', 'separator': ',', 'part_index': 1, }, outputs=[ dict( uuid='location_city', column_type='text', ), ], ) action2 = dict( action_arguments=['location'], action_options={ 'udf': 'string_split', 'separator': ',', 'part_index': 3, }, outputs=[ dict( uuid='num', column_type='number', ), ], ) df_new = add_column(add_column(df, action), action2) df_expected = pd.DataFrame([ ['Street1, Long Beach, CA, ', 'Long Beach', 0], ['Street2,Vernon, CA, 123', 'Vernon', 123], ['Pacific Coast Highway, Los Angeles, CA, 111', 'Los Angeles', 111], ], columns=[ 'location', 'location_city', 'num', ]) assert_frame_equal(df_new, df_expected) def test_add_column_substring(self): df = pd.DataFrame([ ['$1000.0'], ['$321.9'], ], columns=[ 'amount', ]) action = dict( action_arguments=['amount'], action_options={ 'udf': 'substring', 'start': 1, 'stop': -2, }, outputs=[ dict( uuid='amount_int', column_type='text', ), ], ) df_new = add_column(df, action) df_expected = pd.DataFrame([ ['$1000.0', '1000'], ['$321.9', '321'], ], columns=[ 'amount', 'amount_int', ]) assert_frame_equal(df_new, df_expected) def test_average(self): from data_cleaner.transformer_actions.column import average action = self.__groupby_agg_action('average_amount') df_new = average(TEST_DATAFRAME.copy(), action) df_expected = pd.DataFrame([ [1, 1000, 1050], [2, 1050, 1100], [1, 1100, 1050], [2, 1150, 1100], ], columns=[ 'group_id', 'amount', 'average_amount' ]) assert_frame_equal(df_new, df_expected) def test_count(self): df = pd.DataFrame([ [1, 1000], [1, 1050], [1, 1100], [2, 1150], ], columns=[ 'group_id', 'order_id', ]) action = dict( action_arguments=['order_id'], action_options=dict( groupby_columns=['group_id'] ), outputs=[ dict(uuid='order_count'), ], ) df_new = count(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, order_id=1000, order_count=3, ), dict( group_id=1, order_id=1050, order_count=3, ), dict( group_id=1, order_id=1100, order_count=3, ), dict( group_id=2, order_id=1150, order_count=1, ), ]) def test_count_distinct(self): df = pd.DataFrame([ [1, 1000], [1, 1000], [1, 1100], [2, 1150], ], columns=[ 'group_id', 'order_id', ]) action = dict( action_arguments=['order_id'], action_options=dict( groupby_columns=['group_id'] ), outputs=[ dict(uuid='order_count'), ], ) df_new = count_distinct(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, order_id=1000, order_count=2, ), dict( group_id=1, order_id=1000, order_count=2, ), dict( group_id=1, order_id=1100, order_count=2, ), dict( group_id=2, order_id=1150, order_count=1, ), ]) def test_count_with_time_window(self): df = pd.DataFrame([ [1, 1000, '2021-10-01', '2021-09-01'], [1, 1050, '2021-10-01', '2021-08-01'], [1, 1100, '2021-10-01', '2021-01-01'], [2, 1150, '2021-09-01', '2021-08-01'], ], columns=[ 'group_id', 'order_id', 'group_churned_at', 'order_created_at', ]) action = dict( action_arguments=['order_id'], action_code='', action_options=dict( groupby_columns=['group_id'], timestamp_feature_a='group_churned_at', timestamp_feature_b='order_created_at', window=90*24*3600, ), outputs=[ dict(uuid='order_count'), ], ) df_new = count(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, order_id=1000, group_churned_at='2021-10-01', order_created_at='2021-09-01', order_count=2, ), dict( group_id=1, order_id=1050, group_churned_at='2021-10-01', order_created_at='2021-08-01', order_count=2, ), dict( group_id=1, order_id=1100, group_churned_at='2021-10-01', order_created_at='2021-01-01', order_count=2, ), dict( group_id=2, order_id=1150, group_churned_at='2021-09-01', order_created_at='2021-08-01', order_count=1, ), ]) def test_count_with_filter(self): df = pd.DataFrame([ [1, 1000, '2021-10-01', '2021-09-01'], [1, 1050, '2021-10-01', '2021-08-01'], [1, 1100, '2021-10-01', '2021-01-01'], [2, 1150, '2021-09-01', '2021-08-01'], [2, 1200, '2021-09-01', '2021-08-16'], [2, 1250, '2021-09-01', '2021-08-14'], ], columns=[ 'group_id', 'order_id', 'group_churned_at', 'order_created_at', ]) action = dict( action_arguments=['order_id'], action_code='order_created_at < \'2021-08-15\'', action_options=dict( groupby_columns=['group_id'], ), outputs=[ dict(uuid='order_count'), ], ) df_new = count(df, action) df_expected = pd.DataFrame([ [1, 1000, '2021-10-01', '2021-09-01', 2], [1, 1050, '2021-10-01', '2021-08-01', 2], [1, 1100, '2021-10-01', '2021-01-01', 2], [2, 1150, '2021-09-01', '2021-08-01', 2], [2, 1200, '2021-09-01', '2021-08-16', 2], [2, 1250, '2021-09-01', '2021-08-14', 2], ], columns=[ 'group_id', 'order_id', 'group_churned_at', 'order_created_at', 'order_count', ]) assert_frame_equal(df_new, df_expected) def test_clean_column_name(self): df = pd.DataFrame([ ['', '', '', '', '', '', '', '', ''] ], columns=[ 'good_name', 'Bad Case', '%@#342%34@@#342', 'yield', '12342', '1234. 23', 'true', 'true_crime', '@#f$%&*o$*(%^&r*$%&' ] ) expected_df = pd.DataFrame([ ['', '', '', '', '', '', '', '', ''] ], columns=[ 'good_name', 'bad_case', 'number_34234342', 'yield_', 'number_12342', '1234___23', 'true_', 'true_crime', 'for_' ] ) action = dict( action_type='clean_column_name', action_arguments=[ 'Bad Case', '%@#342%34@@#342', 'yield', '12342', '1234. 23', 'true', '@#f$%&*o$*(%^&r*$%&' ], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ) new_df = clean_column_name(df, action) assert_frame_equal(new_df, expected_df) def test_diff(self): df = pd.DataFrame([ ['2020-01-01', 1000], ['2020-01-02', 1050], ['2020-01-03', 1200], ['2020-01-04', 990], ], columns=[ 'date', 'sold', ]) action = dict( action_arguments=['sold'], outputs=[ dict(uuid='sold_diff'), ], ) df_new = diff(df, action) self.assertEqual(df_new.to_dict(orient='records')[1:], [ dict( date='2020-01-02', sold=1050, sold_diff=50, ), dict( date='2020-01-03', sold=1200, sold_diff=150, ), dict( date='2020-01-04', sold=990, sold_diff=-210, ), ]) # def test_expand_column(self): # df = pd.DataFrame([ # [1, 'game'], # [1, 'book'], # [1, 'game'], # [2, 'Video Game'], # [1, 'Video Game'], # [2, 'book'], # [1, 'Video Game'], # [2, 'Video Game'], # ], columns=[ # 'group_id', # 'category', # ]) # action = dict( # action_arguments=['category'], # action_options=dict( # groupby_columns=['group_id'] # ), # outputs=[ # dict(uuid='category_expanded_count_game'), # dict(uuid='category_expanded_count_book'), # dict(uuid='category_expanded_count_video_game'), # dict(uuid='category_expanded_count_clothing'), # ], # ) # df_new = expand_column(df, action) # df_expected = pd.DataFrame([ # [1, 'game', 2, 1, 2], # [1, 'book', 2, 1, 2], # [1, 'game', 2, 1, 2], # [2, 'Video Game', 0, 1, 2], # [1, 'Video Game', 2, 1, 2], # [2, 'book', 0, 1, 2], # [1, 'Video Game', 2, 1, 2], # [2, 'Video Game', 0, 1, 2], # ], columns=[ # 'group_id', # 'category', # 'category_expanded_count_game', # 'category_expanded_count_book', # 'category_expanded_count_video_game', # ]) # assert_frame_equal(df_new, df_expected) # def test_expand_column_with_time_window(self): # df = pd.DataFrame([ # [1, 'game', '2021-01-02', '2021-01-04'], # [1, 'book', '2021-01-02', '2021-01-04'], # [1, 'game', '2021-01-03', '2021-01-04'], # [2, 'Video Game', '2021-01-01', '2021-01-03'], # [1, 'Video Game', '2021-01-01', '2021-01-04'], # [2, 'book', '2021-01-02', '2021-01-03'], # [1, 'Video Game', '2021-01-03', '2021-01-04'], # [2, 'Video Game', '2020-12-30', '2021-01-03'], # ], columns=[ # 'group_id', # 'category', # 'timestamp1', # 'timestamp2', # ]) # action = dict( # action_arguments=['category'], # action_options=dict( # groupby_columns=['group_id'], # timestamp_feature_a='timestamp2', # timestamp_feature_b='timestamp1', # window=172800, # ), # outputs=[ # dict(uuid='category_expanded_count_game_2d'), # dict(uuid='category_expanded_count_book_2d'), # dict(uuid='category_expanded_count_video_game_2d'), # dict(uuid='category_expanded_count_clothing_2d'), # ], # ) # df_new = expand_column(df, action) # df_expected = pd.DataFrame([ # [1, 'game', '2021-01-02', '2021-01-04', 2, 1, 1], # [1, 'book', '2021-01-02', '2021-01-04', 2, 1, 1], # [1, 'game', '2021-01-03', '2021-01-04', 2, 1, 1], # [2, 'Video Game', '2021-01-01', '2021-01-03', 0, 1, 1], # [1, 'Video Game', '2021-01-01', '2021-01-04', 2, 1, 1], # [2, 'book', '2021-01-02', '2021-01-03', 0, 1, 1], # [1, 'Video Game', '2021-01-03', '2021-01-04', 2, 1, 1], # [2, 'Video Game', '2020-12-30', '2021-01-03', 0, 1, 1], # ], columns=[ # 'group_id', # 'category', # 'timestamp1', # 'timestamp2', # 'category_expanded_count_game_2d', # 'category_expanded_count_book_2d', # 'category_expanded_count_video_game_2d', # ]) # assert_frame_equal(df_new, df_expected) def test_first_column(self): df = pd.DataFrame([ [1, 1000], [2, 1050], [1, 1100], [2, 1150], ], columns=[ 'group_id', 'order_id', ]) action = dict( action_arguments=['order_id'], action_options=dict( groupby_columns=['group_id'] ), outputs=[ dict(uuid='first_order'), ], ) df_new = first(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, order_id=1000, first_order=1000, ), dict( group_id=2, order_id=1050, first_order=1050, ), dict( group_id=1, order_id=1100, first_order=1000, ), dict( group_id=2, order_id=1150, first_order=1050, ), ]) def test_impute(self): from data_cleaner.transformer_actions.column import impute df = pd.DataFrame([ ['2020-01-01', 1000, ' ', 800], ['2020-01-02', '', 1200, 700], ['2020-01-03', 1200, np.NaN, 900], ['2020-01-04', np.NaN, ' ', 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) action1 = dict( action_arguments=['sold', 'curr_profit'], action_options={ 'value': '0', }, action_variables={ '0': { 'feature': { 'column_type': 'number', 'uuid': 'sold', }, 'type': 'feature', }, '1': { 'feature': { 'column_type': 'number', 'uuid': 'curr_profit', }, 'type': 'feature', }, }, ) action2 = dict( action_arguments=['sold'], action_options={ 'value': '0', }, action_variables={ '0': { 'feature': { 'column_type': 'number', 'uuid': 'sold', }, 'type': 'feature', }, }, ) action3 = dict( action_arguments=['sold', 'curr_profit'], action_options={ 'strategy': 'average', }, ) action4 = dict( action_arguments=['sold', 'curr_profit'], action_options={ 'strategy': 'median', }, ) action5 = dict( action_arguments=['sold', 'curr_profit'], action_options={ 'strategy': 'column', 'value': 'prev_sold', }, ) action_invalid = dict( action_arguments=['sold', 'curr_profit'], action_options={ 'strategy': 'mode', }, ) df_new1 = impute(df.copy(), action1) df_new2 = impute(df.copy(), action2) df_new3 = impute(df.copy(), action3) df_new4 = impute(df.copy(), action4) df_new5 = impute(df.copy(), action5) df_expected1 = pd.DataFrame([ ['2020-01-01', 1000, 0, 800], ['2020-01-02', 0, 1200, 700], ['2020-01-03', 1200, 0, 900], ['2020-01-04', 0, 0, 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) df_expected2 = pd.DataFrame([ ['2020-01-01', 1000, ' ', 800], ['2020-01-02', 0, 1200, 700], ['2020-01-03', 1200, np.nan, 900], ['2020-01-04', 0, ' ', 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) df_expected3 = pd.DataFrame([ ['2020-01-01', 1000, 1250, 800], ['2020-01-02', 1300, 1200, 700], ['2020-01-03', 1200, 1250, 900], ['2020-01-04', 1300, 1250, 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) df_expected4 = pd.DataFrame([ ['2020-01-01', 1000, 1250, 800], ['2020-01-02', 1200, 1200, 700], ['2020-01-03', 1200, 1250, 900], ['2020-01-04', 1200, 1250, 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) df_expected5 = pd.DataFrame([ ['2020-01-01', 1000, 800, 800], ['2020-01-02', 700, 1200, 700], ['2020-01-03', 1200, 900, 900], ['2020-01-04', 700, 700, 700], ['2020-01-05', 1700, 1300, 800], ], columns=[ 'date', 'sold', 'curr_profit', 'prev_sold', ]) df_new1['sold'] = df_new1['sold'].astype(int) df_new1['curr_profit'] = df_new1['curr_profit'].astype(int) df_new2['sold'] = df_new2['sold'].astype(int) df_new3['sold'] = df_new3['sold'].astype(int) df_new3['curr_profit'] = df_new3['curr_profit'].astype(int) df_new4['sold'] = df_new4['sold'].astype(int) df_new4['curr_profit'] = df_new4['curr_profit'].astype(int) df_new5['sold'] = df_new5['sold'].astype(int) df_new5['curr_profit'] = df_new5['curr_profit'].astype(int) assert_frame_equal(df_new1, df_expected1) assert_frame_equal(df_new2, df_expected2) assert_frame_equal(df_new3, df_expected3) assert_frame_equal(df_new4, df_expected4) assert_frame_equal(df_new5, df_expected5) with self.assertRaises(Exception): _ = impute(df.copy(), action_invalid) def test_last_column(self): df = pd.DataFrame([ [1, 1000], [2, 1050], [1, 1100], [2, 1150], ], columns=[ 'group_id', 'order_id', ]) action = dict( action_arguments=['order_id'], action_options=dict( groupby_columns=['group_id'] ), outputs=[ dict(uuid='last_order'), ], ) df_new = last(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, order_id=1000, last_order=1100, ), dict( group_id=2, order_id=1050, last_order=1150, ), dict( group_id=1, order_id=1100, last_order=1100, ), dict( group_id=2, order_id=1150, last_order=1150, ), ]) def test_max(self): from data_cleaner.transformer_actions.column import max action = self.__groupby_agg_action('max_amount') df_new = max(TEST_DATAFRAME.copy(), action) df_expected = pd.DataFrame([ [1, 1000, 1100], [2, 1050, 1150], [1, 1100, 1100], [2, 1150, 1150], ], columns=[ 'group_id', 'amount', 'max_amount', ]) assert_frame_equal(df_new, df_expected) action2 = dict( action_arguments=['amount'], action_options=dict(), outputs=[ dict(uuid='max_amount'), ], ) df_new2 = max(TEST_DATAFRAME.copy(), action2) df_expected2 = pd.DataFrame([ [1, 1000, 1150], [2, 1050, 1150], [1, 1100, 1150], [2, 1150, 1150], ], columns=[ 'group_id', 'amount', 'max_amount', ]) assert_frame_equal(df_new2, df_expected2) def test_median(self): from data_cleaner.transformer_actions.column import median action = self.__groupby_agg_action('median_amount') df = pd.DataFrame([ [1, 1000], [2, 1050], [1, 1100], [2, 1550], [2, 1150], ], columns=[ 'group_id', 'amount', ]) df_new = median(df, action) df_expected = pd.DataFrame([ [1, 1000, 1050], [2, 1050, 1150], [1, 1100, 1050], [2, 1550, 1150], [2, 1150, 1150], ], columns=[ 'group_id', 'amount', 'median_amount', ]) assert_frame_equal(df_new, df_expected) def test_min(self): from data_cleaner.transformer_actions.column import min action = self.__groupby_agg_action('min_amount') df_new = min(TEST_DATAFRAME.copy(), action) df_expected = pd.DataFrame([ [1, 1000, 1000], [2, 1050, 1050], [1, 1100, 1000], [2, 1150, 1050], ], columns=[ 'group_id', 'amount', 'min_amount', ]) assert_frame_equal(df_new, df_expected) def test_select(self): df = pd.DataFrame([ [1, 1000], [2, 1050], ], columns=[ 'group_id', 'order_id', ]) action = dict( action_arguments=['group_id'] ) df_new = select(df, action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, ), dict( group_id=2, ), ]) def test_shift_down(self): df = pd.DataFrame([ ['2020-01-01', 1000], ['2020-01-02', 1050], ['2020-01-03', 1200], ['2020-01-04', 990], ], columns=[ 'date', 'sold', ]) action = dict( action_arguments=['sold'], outputs=[ dict(uuid='prev_sold'), ], ) df_new = shift_down(df, action) self.assertEqual(df_new.to_dict(orient='records')[1:], [ dict( date='2020-01-02', sold=1050, prev_sold=1000, ), dict( date='2020-01-03', sold=1200, prev_sold=1050, ), dict( date='2020-01-04', sold=990, prev_sold=1200, ), ]) def test_shift_down_with_groupby(self): df = pd.DataFrame([ [1, '2020-01-01', 1000], [1, '2020-01-02', 1050], [2, '2020-01-03', 1200], [1, '2020-01-04', 990], [2, '2020-01-05', 980], [2, '2020-01-06', 970], [2, '2020-01-07', 960], ], columns=[ 'group_id', 'date', 'sold', ]) action = dict( action_arguments=['sold'], action_options=dict( groupby_columns=['group_id'], periods=2, ), outputs=[ dict(uuid='prev_sold'), ], ) df_new = shift_down(df, action) df_expected = pd.DataFrame([ [1, '2020-01-01', 1000, None], [1, '2020-01-02', 1050, None], [2, '2020-01-03', 1200, None], [1, '2020-01-04', 990, 1000], [2, '2020-01-05', 980, None], [2, '2020-01-06', 970, 1200], [2, '2020-01-07', 960, 980], ], columns=[ 'group_id', 'date', 'sold', 'prev_sold', ]) assert_frame_equal(df_new, df_expected) def test_shift_up(self): df = pd.DataFrame([ ['2020-01-01', 1000], ['2020-01-02', 1050], ['2020-01-03', 1200], ['2020-01-04', 990], ], columns=[ 'date', 'sold', ]) action = dict( action_arguments=['sold'], outputs=[ dict(uuid='next_sold'), ], ) df_new = shift_up(df, action) self.assertEqual(df_new.to_dict(orient='records')[:-1], [ dict( date='2020-01-01', sold=1000, next_sold=1050, ), dict( date='2020-01-02', sold=1050, next_sold=1200, ), dict( date='2020-01-03', sold=1200, next_sold=990, ), ]) def test_sum(self): from data_cleaner.transformer_actions.column import sum action = self.__groupby_agg_action('total_amount') df_new = sum(TEST_DATAFRAME.copy(), action) self.assertEqual(df_new.to_dict(orient='records'), [ dict( group_id=1, amount=1000, total_amount=2100, ), dict( group_id=2, amount=1050, total_amount=2200, ), dict( group_id=1, amount=1100, total_amount=2100, ), dict( group_id=2, amount=1150, total_amount=2200, ), ]) def __groupby_agg_action(self, output_col): return dict( action_arguments=['amount'], action_options=dict( groupby_columns=['group_id'] ), outputs=[ dict(uuid=output_col), ], ) mage_ai/tests/data_cleaner/transformer_actions/test_base.py METASEP from data_cleaner.transformer_actions.base import BaseAction from data_cleaner.shared.hash import merge_dict from tests.base_test import TestCase from tests.data_cleaner.transformer_actions.shared import TEST_ACTION import numpy as np import pandas as pd def build_df(): return pd.DataFrame([ [2, False, 5.0], ['$3', False, '$6.0', 1], ['$4,000', None, '$7,000', 200], ['$3', False, '$4.0', 3], ['$4,000', None, 3.0, 4], [5, True, 8000, 5], ], columns=['deposited', 'fund', 'amount', 'index']).set_index('index') class BaseActionTests(TestCase): # def test_execute(self): # df = build_df() # base_action = BaseAction(merge_dict( # TEST_ACTION, # dict(action_code='%{1_1} >= 3 and (%{1_2} == false or %{1_2} != %{1_2}) and %{1_4} >= 5.0'), # )) # self.assertEqual(base_action.execute(df).sort_values('deposited').to_numpy().tolist(), [ # ['$3', False, '$6.0'], # ['$4,000', None, '$7,000'], # ]) def test_execute_axis_column(self): df = build_df() base_action = BaseAction(merge_dict( TEST_ACTION, dict( action_arguments=[ '%{1_1}', # '%{3_1}', ], action_type='remove', axis='column', ), )) df_new = base_action.execute(df) self.assertEqual(df_new.values.tolist(), [ [False, 5.0], [False, '$6.0'], [None, '$7,000'], [False, '$4.0'], [None, 3.0], [True, 8000], ]) def test_execute_with_no_columns_to_transform(self): df = build_df() base_action = BaseAction(merge_dict( TEST_ACTION, dict( action_arguments=[ '%{1_1}', ], action_type='remove', axis='column', ), )) raised = False try: base_action.execute(df.drop(columns=['deposited'])) except Exception: raised = True self.assertFalse(raised) def test_groupby(self): df = pd.DataFrame([ ['a', '2020-01-03', 1050], ['a', '2020-01-01', 1000], ['b', '2020-01-04', 990], ['a', '2020-01-02', 1100], ['b', '2020-01-03', 1200], ], columns=[ 'store', 'date', 'sold', ]) base_action = BaseAction(dict( action_type='group', action_arguments=['store'], action_code='', action_variables=dict(), child_actions=[ dict( action_type='sort', axis='row', action_arguments=['date'], action_code='', action_variables=dict(), ), dict( action_type='diff', action_arguments=['sold'], action_code='', action_variables=dict(), axis='column', outputs=[dict(uuid='sold_diff')] ), dict( action_type='shift_down', action_arguments=['sold'], action_code='', action_variables=dict(), axis='column', outputs=[dict(uuid='prev_sold')] ), ], )) df_new = base_action.execute(df) df_new = df_new.fillna(0) self.assertEqual(df_new.values.tolist(), [ ['a', '2020-01-01', 1000, 0, 0], ['a', '2020-01-02', 1100, 100, 1000], ['a', '2020-01-03', 1050, -50, 1100], ['b', '2020-01-03', 1200, 0, 0], ['b', '2020-01-04', 990, -210, 1200], ]) def test_hydrate_action(self): base_action = BaseAction(TEST_ACTION) base_action.hydrate_action() hydrated_action = TEST_ACTION.copy() hydrated_action['action_code'] = \ 'omni.deposited == True and (omni.fund == "The Quant" or omni.fund == "Yield")' hydrated_action['action_arguments'] = [ 'omni.deposited', 'magic.spell', ] hydrated_action['action_options'] = dict( condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000', default=0, timestamp_feature_a='omni.fund', timestamp_feature_b='omni.delivered_at', window=2592000, ) self.assertEqual(base_action.action, hydrated_action) def test_hydrate_action_when_adding_column(self): base_action = BaseAction(merge_dict(TEST_ACTION, dict( action_type='add', axis='column', ))) base_action.hydrate_action() hydrated_action = TEST_ACTION.copy() hydrated_action['action_code'] = \ 'omni.deposited == True and (omni.fund == "The Quant" or omni.fund == "Yield")' hydrated_action['action_type'] = 'add' hydrated_action['axis'] = 'column' hydrated_action['action_arguments'] = [ 'omni.deposited', 'magic.spell', ] hydrated_action['action_options'] = dict( condition='omni.delivered_at >= magic.booked_at and magic.booked_at >= omni.delivered_at - 2592000', default=0, timestamp_feature_a='omni.fund', timestamp_feature_b='omni.delivered_at', window=2592000, ) self.assertEqual(base_action.action, hydrated_action) def test_join(self): df1 = pd.DataFrame([ ['a', '2020-01-03', 1050], ['a', '2020-01-01', 1000], ['b', '2020-01-04', 990], ['a', '2020-01-02', 1100], ['b', '2020-01-03', 1200], ['c', '2020-01-07', 1250], ], columns=[ 'store', 'date', 'sold', ]) df2 = pd.DataFrame([ ['a', 'Store A'], ['b', 'Store B'], ], columns=[ 'store_name', 'description', ]) base_action = BaseAction(dict( action_type='join', action_arguments=[100], action_code='', action_options=dict( left_on=['store'], right_on=['store_name'], drop_columns=['store_name'], rename_columns={'description': 'store_description'} ), action_variables=dict(), )) df_new = base_action.execute(df1, df_to_join=df2) self.assertEqual(df_new.values.tolist(), [ ['a', '2020-01-03', 1050, 'Store A'], ['a', '2020-01-01', 1000, 'Store A'], ['b', '2020-01-04', 990, 'Store B'], ['a', '2020-01-02', 1100, 'Store A'], ['b', '2020-01-03', 1200, 'Store B'], ['c', '2020-01-07', 1250, np.NaN], ]) self.assertEqual(df_new.columns.to_list(), [ 'store', 'date', 'sold', 'store_description', ]) def test_join_rename_column(self): df1 = pd.DataFrame([ ['a', '2020-01-03', 1050], ['a', '2020-01-01', 1000], ['b', '2020-01-04', 990], ['a', '2020-01-02', 1100], ['b', '2020-01-03', 1200], ['c', '2020-01-07', 1250], ], columns=[ 'store', 'date', 'sold', ]) df2 = pd.DataFrame([ ['a', 'Store A', '2020-02-01'], ['b', 'Store B', '2020-02-02'], ], columns=[ 'store_name', 'description', 'date', ]) base_action = BaseAction(dict( action_type='join', action_arguments=[100], action_code='', action_options=dict( left_on=['store'], right_on=['store_name'], drop_columns=['store_name'], rename_columns={'description': 'store_description'} ), action_variables=dict(), outputs=[ { 'source_feature': { 'uuid': 'store_name', }, 'uuid': 'store_name', }, { 'source_feature': { 'uuid': 'description', }, 'uuid': 'description', }, { 'source_feature': { 'uuid': 'date', }, 'uuid': 'date_1', } ] )) df_new = base_action.execute(df1, df_to_join=df2) self.assertEqual(df_new.values.tolist(), [ ['a', '2020-01-03', 1050, 'Store A', '2020-02-01'], ['a', '2020-01-01', 1000, 'Store A', '2020-02-01'], ['b', '2020-01-04', 990, 'Store B', '2020-02-02'], ['a', '2020-01-02', 1100, 'Store A', '2020-02-01'], ['b', '2020-01-03', 1200, 'Store B', '2020-02-02'], ['c', '2020-01-07', 1250, np.NaN, np.NaN], ]) self.assertEqual(df_new.columns.to_list(), [ 'store', 'date', 'sold', 'store_description', 'date_1', ]) def test_join_rename_join_key(self): df1 = pd.DataFrame([ ['a', '2020-01-03', 1050], ['a', '2020-01-01', 1000], ['b', '2020-01-04', 990], ['a', '2020-01-02', 1100], ['b', '2020-01-03', 1200], ['c', '2020-01-07', 1250], ], columns=[ 'store', 'date', 'sold', ]) df2 = pd.DataFrame([ ['a', 'Store A', '2020-02-01'], ['b', 'Store B', '2020-02-02'], ], columns=[ 'store', 'description', 'date', ]) base_action = BaseAction(dict( action_type='join', action_arguments=[100], action_code='', action_options=dict( left_on=['store'], right_on=['store'], ), action_variables=dict(), outputs=[ { 'source_feature': { 'uuid': 'store', }, 'uuid': 'store_1', }, { 'source_feature': { 'uuid': 'description', }, 'uuid': 'description', }, { 'source_feature': { 'uuid': 'date', }, 'uuid': 'date_1', } ] )) df_new = base_action.execute(df1, df_to_join=df2) self.assertEqual(df_new.values.tolist(), [ ['a', '2020-01-03', 1050, 'a', 'Store A', '2020-02-01'], ['a', '2020-01-01', 1000, 'a', 'Store A', '2020-02-01'], ['b', '2020-01-04', 990, 'b', 'Store B', '2020-02-02'], ['a', '2020-01-02', 1100, 'a', 'Store A', '2020-02-01'], ['b', '2020-01-03', 1200, 'b', 'Store B', '2020-02-02'], ['c', '2020-01-07', 1250, np.NaN, np.NaN, np.NaN], ]) self.assertEqual(df_new.columns.to_list(), [ 'store', 'date', 'sold', 'store_1', 'description', 'date_1', ]) def test_join_cast_to_str(self): df1 = pd.DataFrame([ [1, '2020-01-03', 1050], [1, '2020-01-01', 1000], [2, '2020-01-04', 990], [1, '2020-01-02', 1100], [2, '2020-01-03', 1200], [3, '2020-01-07', 1250], ], columns=[ 'store', 'date', 'sold', ]) df2 = pd.DataFrame([ ['1', 'Store A'], ['2', 'Store B'], ], columns=[ 'store_name', 'description', ]) base_action = BaseAction(dict( action_type='join', action_arguments=[100], action_code='', action_options=dict( left_on=['store'], right_on=['store_name'], drop_columns=['store_name'], rename_columns={'description': 'store_description'} ), action_variables=dict(), )) df_new = base_action.execute(df1, df_to_join=df2) self.assertEqual(df_new.values.tolist(), [ ['1', '2020-01-03', 1050, 'Store A'], ['1', '2020-01-01', 1000, 'Store A'], ['2', '2020-01-04', 990, 'Store B'], ['1', '2020-01-02', 1100, 'Store A'], ['2', '2020-01-03', 1200, 'Store B'], ['3', '2020-01-07', 1250, np.NaN], ]) self.assertEqual(df_new.columns.to_list(), [ 'store', 'date', 'sold', 'store_description', ]) mage_ai/tests/data_cleaner/transformer_actions/shared.py METASEP TEST_ACTION = dict( action_type='filter', axis='row', action_code='%{1}.%{1_1} == True and (%{1}.%{1_2} == \"The Quant\" or %{1}.%{1_2} == \"Yield\")', action_arguments=[ '%{1}.%{1_1}', '%{2}.%{2_1}', ], action_options=dict( condition='%{1}.%{1_3} >= %{2}.%{2_2} and %{2}.%{2_2} >= %{1}.%{1_3} - 2592000', default=0, timestamp_feature_a='%{1}.%{1_2}', timestamp_feature_b='%{1}.%{1_3}', window=2592000, ), action_variables={ '1': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='omni'), )), '1_1': dict(type='feature', feature=dict(column_type='number', uuid='deposited')), '1_2': dict(type='feature', feature=dict(column_type='category', uuid='fund')), '1_3': dict(type='feature', feature=dict(column_type='category', uuid='delivered_at')), '1_4': dict(type='feature', feature=dict(column_type='number_with_decimals', uuid='amount')), '2': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='magic'), )), '2_1': dict(type='feature', feature=dict(column_type='category', uuid='spell')), '2_2': dict(type='feature', feature=dict(column_type='category', uuid='booked_at')), '3_1': dict(type='feature', feature=dict(column_type='number', uuid='age')), }, ) mage_ai/tests/data_cleaner/transformer_actions/__init__.py METASEP mage_ai/tests/data_cleaner/cleaning_rules/test_remove_duplicate_rows.py METASEP from data_cleaner.cleaning_rules.remove_duplicate_rows \ import RemoveDuplicateRows from tests.base_test import TestCase import numpy as np import pandas as pd class RemoveDuplicateRowsTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01'], [2, '2022-01-02'], [3, '2022-01-03'], [2, '2022-01-02'], [4, '2022-01-04'], [5, '2022-01-05'], [3, '2022-01-03'] ], columns=['id', 'deleted_at']) column_types = { 'id': 'number', 'deleted_at': 'datetime', } result = RemoveDuplicateRows( df, column_types, {}, ).evaluate() self.assertEqual(result, [ dict( title='Remove duplicate rows', message='There\'re 2 duplicate rows in the dataset.' ' Suggest to remove them.', action_payload=dict( action_type='drop_duplicate', action_arguments=[], action_code='', action_options={}, action_variables={}, axis='row', outputs=[], ), ) ]) mage_ai/tests/data_cleaner/cleaning_rules/test_remove_columns_with_single_value.py METASEP from data_cleaner.cleaning_rules.remove_columns_with_single_value \ import RemoveColumnsWithSingleValue from tests.base_test import TestCase import pandas as pd import numpy as np class RemoveColumnWithSingleValueTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01', True], [2, '2022-01-02', True], [3, np.NaN, True], [4, np.NaN, True], [5, np.NaN, True], ], columns=['id', 'deleted_at', 'is_active']) column_types = { 'id': 'number', 'deleted_at': 'datetime', 'is_active': 'true_or_false', } statistics = { 'id/count_distinct': 5, 'deleted_at/count_distinct': 2, 'is_active/count_distinct': 1, } result = RemoveColumnsWithSingleValue(df, column_types, statistics).evaluate() self.assertEqual(result, [ dict( title='Remove columns with single value', message='The following columns have single value in all rows: [\'is_active\'].' ' Suggest to remove them.', action_payload=dict( action_type='remove', action_arguments=['is_active'], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) mage_ai/tests/data_cleaner/cleaning_rules/test_remove_columns_with_high_empty_rate.py METASEP from data_cleaner.cleaning_rules.remove_columns_with_high_empty_rate \ import RemoveColumnsWithHighEmptyRate from tests.base_test import TestCase import numpy as np import pandas as pd class RemoveColumnWithHighEmptyRateTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ [1, '2022-01-01'], [2, np.NaN], [3, np.NaN], [4, np.NaN], [5, np.NaN], ], columns=['id', 'deleted_at']) column_types = { 'id': 'number', 'deleted_at': 'datetime', } statistics = { 'id/null_value_rate': 0, 'deleted_at/null_value_rate': 0.8, } result = RemoveColumnsWithHighEmptyRate( df, column_types, statistics, ).evaluate() self.assertEqual(result, [ dict( title='Remove columns with high empty rate', message='The following columns have high empty rate: [\'deleted_at\'].' ' Removing them may increase your data quality.', action_payload=dict( action_type='remove', action_arguments=['deleted_at'], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) mage_ai/tests/data_cleaner/cleaning_rules/test_remove_collinear_columns.py METASEP from data_cleaner.cleaning_rules.remove_collinear_columns import RemoveCollinearColumns from tests.base_test import TestCase from pandas.util.testing import assert_frame_equal import numpy as np import pandas as pd class RemoveCollinearColumnsTests(TestCase): def setUp(self): self.rng = np.random.default_rng(42) return super().setUp() def test_categorical_data_frame(self): df = pd.DataFrame([ [1, 1000, '2021-10-01', '2021-09-01'], [1, 1050, '2021-10-01', '2021-08-01'], [1, 1100, '2021-10-01', '2021-01-01'], [2, 1150, '2021-09-01', '2021-08-01'], ], columns=[ 'group_id', 'order_id', 'group_churned_at', 'order_created_at', ]) column_types = { 'group_id': 'category', 'order_id': 'category', 'group_churned_at': 'datetime', 'order_created_at': 'datetime' } statistics = {} result = RemoveCollinearColumns(df, column_types, statistics).evaluate() self.assertEqual(result, []) def test_clean_removes_all_data_frame(self): df = pd.DataFrame([ [None, 1000, '2021-10-01', '2021-09-01'], [1, None, '2021-10-01', '2021-08-01'], [np.nan, 1100, '2021-10-01', '2021-01-01'], [None, 1150, '2021-09-01', '2021-08-01'], ], columns=[ 'group_id', 'order_id', 'group_churned_at', 'order_created_at', ]) column_types = { 'group_id': 'number', 'order_id': 'number', 'group_churned_at': 'datetime', 'order_created_at': 'datetime' } statistics = {} result = RemoveCollinearColumns(df, column_types, statistics).evaluate() self.assertEqual(result, []) def test_collinear_no_results(self): df = pd.DataFrame([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1] ], columns=['number_of_users', 'views', 'revenue', 'losses']) column_types = { 'number_of_users': 'number', 'views': 'number', 'revenue': 'number', 'losses': 'number', } statistics = {} result = RemoveCollinearColumns(df, column_types, statistics).evaluate() self.assertEqual(result, []) def test_evaluate(self): df = pd.DataFrame([ [1000, 30000, 10, 100, 30], [500, 10000, 20, 3000, 20], [250, 7500, 25, 8000, 20], [1000, 45003, 20, 90, 40], [1500, 75000, 30, 70, 25], [1250, 60000, 50, 80, 20], [200, 5000, 30, 10000, 30], [800, 12050, 40, 2000, 45], [600, 11000, 50, 3000, 50], [700, 11750, 20, 2750, 55], [1200, 52000, 10, 75, 60] ], columns=[ 'number_of_users', 'views', 'number_of_creators', 'losses', 'number_of_advertisers' ]) column_types = { 'number_of_users': 'number', 'views': 'number', 'number_of_creators': 'number', 'losses': 'number', 'number_of_advertisers': 'number' } statistics = {} rule = RemoveCollinearColumns(df, column_types, statistics) results = rule.evaluate() expected_results = [ dict( title='Remove collinear columns', message='The following columns are strongly correlated ' 'with other columns in the dataset: [\'number_of_users\']. ' 'Removing these columns may increase data quality ' 'by removing redundant and closely related data.', action_payload=dict( action_type='remove', action_arguments=['number_of_users'], axis='column', action_options = {}, action_variables = {}, action_code = '', outputs = [], ) ) ] self.assertEqual(results, expected_results) def test_evaluate_bad_dtypes(self): df = pd.DataFrame([ [1000, 'US', 30000, '10', 'cute animal #1', 100, '30'], ['500', 'CA', 10000, '20', 'intro to regression', 3000, '20'], [200, '', np.nan, 50, 'daily news #1', None, '75'], [250, 'CA', 7500, 25, 'machine learning seminar', 8000, '20'], ['1000', 'MX', 45003, '20', 'cute animal #4', 90, '40'], [1500, 'MX', 75000, '30', '', 70, 25], [1500, 'US', 75000, np.nan, 'daily news #3', 70, '25'], [None, 'US', 75000, '30', 'tutorial: how to start a startup', 70, np.nan], [1250, 'US', 60000, '50', 'cute animal #3', 80, '20'], ['200', 'CA', 5000, '30', '', 10000, '30'], [800, 'US', 12050, '40', 'meme compilation', 2000, '45'], ['600', 'CA', 11000, '50', 'daily news #2', 3000, '50'], [600, 'CA', '', 50, '', 3000, None], ['700', 'MX', 11750, '20', 'cute animal #2', 2750, '55'], [700, '', None, 20, '', None, '55'], [700, 'MX', 11750, '', '', 2750, '55'], [1200, 'MX', 52000, '10', 'vc funding strats', 75, '60'] ], columns=[ 'number_of_users', 'location', 'views', 'number_of_creators', 'name', 'losses', 'number_of_advertisers' ]) cleaned_df = pd.DataFrame([ [1000, 30000, 10, 100, 30], [500, 10000, 20, 3000, 20], [250, 7500, 25, 8000, 20], [1000, 45003, 20, 90, 40], [1500, 75000, 30, 70, 25], [1250, 60000, 50, 80, 20], [200, 5000, 30, 10000, 30], [800, 12050, 40, 2000, 45], [600, 11000, 50, 3000, 50], [700, 11750, 20, 2750, 55], [1200, 52000, 10, 75, 60] ], columns=[ 'number_of_users', 'views', 'number_of_creators', 'losses', 'number_of_advertisers' ]).astype(float) column_types = { 'number_of_users': 'number', 'location': 'category', 'views': 'number', 'number_of_creators': 'number', 'name': 'text', 'losses': 'number', 'number_of_advertisers': 'number' } statistics = {} rule = RemoveCollinearColumns(df, column_types, statistics) assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True)) results = rule.evaluate() expected_results = [ dict( title='Remove collinear columns', message='The following columns are strongly correlated ' 'with other columns in the dataset: [\'number_of_users\']. ' 'Removing these columns may increase data quality ' 'by removing redundant and closely related data.', action_payload=dict( action_type='remove', action_arguments=['number_of_users'], axis='column', action_options = {}, action_variables = {}, action_code = '', outputs = [], ) ) ] self.assertEqual(results, expected_results) def test_evaluate_dirty(self): df = pd.DataFrame([ [1000, 30000, 10, 100, 30], [500, 10000, 20, 3000, 20], [200, np.nan, 50, None, 75], [250, 7500, 25, 8000, 20], [1000, 45003, 20, 90, 40], [1500, 75000, 30, 70, 25], [1500, 75000, np.nan, 70, 25], [None, 75000, 30, 70, np.nan], [1250, 60000, 50, 80, 20], [200, 5000, 30, 10000, 30], [800, 12050, 40, 2000, 45], [600, 11000, 50, 3000, 50], [600, '', 50, 3000, None], [700, 11750, 20, 2750, 55], [700, None, 20, None, 55], [700, 11750, '', 2750, 55], [1200, 52000, 10, 75, 60] ], columns=[ 'number_of_users', 'views', 'number_of_creators', 'losses', 'number_of_advertisers' ]) cleaned_df = pd.DataFrame([ [1000, 30000, 10, 100, 30], [500, 10000, 20, 3000, 20], [250, 7500, 25, 8000, 20], [1000, 45003, 20, 90, 40], [1500, 75000, 30, 70, 25], [1250, 60000, 50, 80, 20], [200, 5000, 30, 10000, 30], [800, 12050, 40, 2000, 45], [600, 11000, 50, 3000, 50], [700, 11750, 20, 2750, 55], [1200, 52000, 10, 75, 60] ], columns=[ 'number_of_users', 'views', 'number_of_creators', 'losses', 'number_of_advertisers' ]).astype(float) column_types = { 'number_of_users': 'number', 'views': 'number', 'number_of_creators': 'number', 'losses': 'number', 'number_of_advertisers': 'number' } statistics = {} rule = RemoveCollinearColumns(df, column_types, statistics) assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True)) results = rule.evaluate() expected_results = [ dict( title='Remove collinear columns', message='The following columns are strongly correlated ' 'with other columns in the dataset: [\'number_of_users\']. ' 'Removing these columns may increase data quality ' 'by removing redundant and closely related data.', action_payload=dict( action_type='remove', action_arguments=['number_of_users'], axis='column', action_options = {}, action_variables = {}, action_code = '', outputs = [], ) ) ] self.assertEqual(results, expected_results) def test_evaluate_non_numeric(self): df = pd.DataFrame([ [1000, 'US', 30000, 10, 'cute animal #1', 100, 30], [500, 'CA', 10000, 20, 'intro to regression', 3000, 20], [200, '', np.nan, 50, 'daily news #1', None, 75], [250, 'CA', 7500, 25, 'machine learning seminar', 8000, 20], [1000, 'MX', 45003, 20, 'cute animal #4', 90, 40], [1500, 'MX', 75000, 30, '', 70, 25], [1500, 'US', 75000, np.nan, 'daily news #3', 70, 25], [None, 'US', 75000, 30, 'tutorial: how to start a startup', 70, np.nan], [1250, 'US', 60000, 50, 'cute animal #3', 80, 20], [200, 'CA', 5000, 30, '', 10000, 30], [800, 'US', 12050, 40, 'meme compilation', 2000, 45], [600, 'CA', 11000, 50, 'daily news #2', 3000, 50], [600, 'CA', '', 50, '', 3000, None], [700, 'MX', 11750, 20, 'cute animal #2', 2750, 55], [700, '', None, 20, '', None, 55], [700, 'MX', 11750, '', '', 2750, 55], [1200, 'MX', 52000, 10, 'vc funding strats', 75, 60] ], columns=[ 'number_of_users', 'location', 'views', 'number_of_creators', 'name', 'losses', 'number_of_advertisers' ]) cleaned_df = pd.DataFrame([ [1000, 30000, 10, 100, 30], [500, 10000, 20, 3000, 20], [250, 7500, 25, 8000, 20], [1000, 45003, 20, 90, 40], [1500, 75000, 30, 70, 25], [1250, 60000, 50, 80, 20], [200, 5000, 30, 10000, 30], [800, 12050, 40, 2000, 45], [600, 11000, 50, 3000, 50], [700, 11750, 20, 2750, 55], [1200, 52000, 10, 75, 60] ], columns=[ 'number_of_users', 'views', 'number_of_creators', 'losses', 'number_of_advertisers' ]).astype(float) column_types = { 'number_of_users': 'number', 'location': 'category', 'views': 'number', 'number_of_creators': 'number', 'name': 'text', 'losses': 'number', 'number_of_advertisers': 'number' } statistics = {} rule = RemoveCollinearColumns(df, column_types, statistics) assert_frame_equal(cleaned_df, rule.numeric_df.reset_index(drop=True)) results = rule.evaluate() expected_results = [ dict( title='Remove collinear columns', message='The following columns are strongly correlated ' 'with other columns in the dataset: [\'number_of_users\']. ' 'Removing these columns may increase data quality ' 'by removing redundant and closely related data.', action_payload=dict( action_type='remove', action_arguments=['number_of_users'], axis='column', action_options = {}, action_variables = {}, action_code = '', outputs = [], ) ) ] self.assertEqual(results, expected_results) def test_perfectly_collinear(self): number_of_users = self.rng.integers(1000, 500000, (10000)) views = number_of_users * 300 revenue = 2 * views - number_of_users losses = revenue / views + number_of_users df = pd.DataFrame({ 'number_of_users':number_of_users, 'views': views, 'revenue': revenue, 'losses': losses }) column_types = { 'number_of_users': 'number', 'views': 'number', 'revenue': 'number', 'losses': 'number', } statistics = {} result = RemoveCollinearColumns(df, column_types, statistics).evaluate() expected_results = [ dict( title='Remove collinear columns', message='The following columns are strongly correlated ' 'with other columns in the dataset:' ' [\'number_of_users\', \'views\', \'revenue\']. ' 'Removing these columns may increase data quality ' 'by removing redundant and closely related data.', action_payload=dict( action_type='remove', action_arguments=['number_of_users', 'views', 'revenue'], axis='column', action_options = {}, action_variables = {}, action_code = '', outputs = [], ) ) ] self.assertEqual(result, expected_results) def test_vif_calcuation(self): df = pd.DataFrame([ [1000, 30000, 10, 100, 30], [500, 10000, 20, 3000, 20], [250, 7500, 25, 8000, 20], [1000, 45003, 20, 90, 40], [1500, 75000, 30, 70, 25], [1250, 60000, 50, 80, 20], [200, 5000, 30, 10000, 30], [800, 12050, 40, 2000, 45], [600, 11000, 50, 3000, 50], [700, 11750, 20, 2750, 55], [1200, 52000, 10, 75, 60] ], columns=[ 'number_of_users', 'views', 'number_of_creators', 'losses', 'number_of_advertisers' ]) column_types = { 'number_of_users': 'number', 'views': 'number', 'number_of_creators': 'number', 'losses': 'number', 'number_of_advertisers': 'number' } statistics = {} rule = RemoveCollinearColumns(df, column_types, statistics) expected_vifs_no_remove = ( 59.32817701051733, 26.10502642724925, 5.6541251174451315, 2.6033835916281176, 10.735934980453335 ) expected_vifs_remove = ( 59.32817701051733, 2.941751614824833, 3.4216357503903243, 1.370441833599666 ) for column, expected_vif in zip(rule.numeric_columns, expected_vifs_no_remove): vif = rule.get_variance_inflation_factor(column) self.assertAlmostEqual(vif, expected_vif) for column, expected_vif in zip(rule.numeric_columns[:-1], expected_vifs_remove): vif = rule.get_variance_inflation_factor(column) self.assertAlmostEqual(vif, expected_vif) rule.numeric_df.drop(column, axis=1, inplace=True) mage_ai/tests/data_cleaner/cleaning_rules/test_clean_column_names.py METASEP from data_cleaner.cleaning_rules.clean_column_names import CleanColumnNames from data_cleaner.transformer_actions.constants import ActionType from tests.base_test import TestCase import pandas as pd class CleanColumnNameTests(TestCase): def test_evaluate(self): df = pd.DataFrame([ ['', '', '', '', '', '', '' , '', ''], ], columns=[ 'good_name', 'Bad Case', '%@#342%34@@#342', 'yield', '12342', '1234. 23', 'true', 'true_crime', '@#f$%&*o$*(%^&r*$%&' ] ) result = CleanColumnNames( df, {}, {}, ).evaluate() self.assertEqual(result, [ dict( title='Clean dirty column names', message='The following columns have unclean naming conventions: ' '[\'Bad Case\', \'%@#342%34@@#342\', \'yield\',' ' \'12342\', \'1234. 23\', \'true\', \'@#f$%&*o$*(%^&r*$%&\']' '. Making these names lowercase and alphanumeric may improve' 'ease of dataset access and reduce security risks.', action_payload=dict( action_type=ActionType.CLEAN_COLUMN_NAME, action_arguments=[ 'Bad Case', '%@#342%34@@#342', 'yield', '12342', '1234. 23', 'true', '@#f$%&*o$*(%^&r*$%&' ], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ), ) ]) mage_ai/tests/data_cleaner/cleaning_rules/__init__.py METASEP mage_ai/data_cleaner/transformer_actions/udf/substring.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Substring(BaseUDF): def execute(self): start = self.options.get('start') stop = self.options.get('stop') if start is None and stop is None: raise Exception('Require at least one of `start` and `stop` parameters.') return self.df[self.arguments[0]].str.slice(start=start, stop=stop) mage_ai/data_cleaner/transformer_actions/udf/string_split.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class StringSplit(BaseUDF): def execute(self): separator = self.options.get('separator') part_index = self.options.get('part_index') if separator is None or part_index is None: raise Exception('Require both `separator` and `part_index` parameters.') return self.df[self.arguments[0]].str.split(separator).str[part_index].str.strip() mage_ai/data_cleaner/transformer_actions/udf/string_replace.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class StringReplace(BaseUDF): def execute(self): pattern = self.options.get('pattern') replacement = self.options.get('replacement') if not pattern and not replacement: raise Exception(f'Require both `pattern` and `replacement` parameters.') return self.df[self.arguments[0]].str.replace(pattern, replacement) mage_ai/data_cleaner/transformer_actions/udf/multiply.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Multiply(BaseUDF): def execute(self): col1 = self.arguments[0] if len(self.arguments) > 1: col2 = self.arguments[1] return self.df[col1].astype(float) * self.df[col2].astype(float) elif self.options.get('value') is not None: return self.df[col1] * float(self.options['value']) raise Exception('Require second column or a value to multiply.') mage_ai/data_cleaner/transformer_actions/udf/if_else.py METASEP from data_cleaner.transformer_actions.action_code import query_with_action_code from data_cleaner.transformer_actions.udf.base import BaseUDF class IfElse(BaseUDF): def execute(self): df_copy = self.df.copy() true_index = query_with_action_code(df_copy, self.code, self.kwargs).index arg1_type = self.options.get('arg1_type', 'value') arg2_type = self.options.get('arg2_type', 'value') arg1 = self.arguments[0] if arg1_type == 'column': arg1 = df_copy[arg1] arg2 = self.arguments[1] if arg2_type == 'column': arg2 = df_copy[arg2] df_copy.loc[true_index, 'result'] = arg1 df_copy['result'] = df_copy['result'].fillna(arg2) return df_copy['result'] mage_ai/data_cleaner/transformer_actions/udf/formatted_date.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import pandas as pd class FormattedDate(BaseUDF): def execute(self): return pd.to_datetime( self.df[self.arguments[0]], ).dt.strftime(self.options['format']) mage_ai/data_cleaner/transformer_actions/udf/divide.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Divide(BaseUDF): def execute(self): col1 = self.arguments[0] if len(self.arguments) > 1: col2 = self.arguments[1] return self.df[col1].astype(float) / self.df[col2].astype(float) elif self.options.get('value') is not None: return self.df[col1] / float(self.options['value']) raise Exception('Require second column or a value to divide.') mage_ai/data_cleaner/transformer_actions/udf/distance_between.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np EARTH_RADIUS = 6371 class DistanceBetween(BaseUDF): def execute(self): def __haversine(lat1, lng1, lat2, lng2): lat1, lng1, lat2, lng2 = np.radians([lat1, lng1, lat2, lng2]) a = np.sin((lat2-lat1)/2.0)**2 + \ np.cos(lat1) * np.cos(lat2) * np.sin((lng2-lng1)/2.0)**2 return EARTH_RADIUS * 2 * np.arcsin(np.sqrt(a)) return __haversine( self.df[self.arguments[0]], self.df[self.arguments[1]], self.df[self.arguments[2]], self.df[self.arguments[3]], ) mage_ai/data_cleaner/transformer_actions/udf/difference.py METASEP from data_cleaner.column_type_detector import DATETIME from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np import pandas as pd class Difference(BaseUDF): def execute(self): col1 = self.arguments[0] column_type = self.options.get('column_type', self.kwargs.get('column_types', {}).get(col1)) if len(self.arguments) > 1: col2 = self.arguments[1] return self.__difference_between_columns( self.df[col1], self.df[col2], column_type=column_type, options=self.options, ) elif self.options.get('value') is not None: return self.__subtract_value( self.df[col1], self.options['value'], column_type=column_type, options=self.options, ) raise Exception('Require second column or a value to minus.') def __difference_between_columns(self, column1, column2, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return (pd.to_datetime(column1, utc=True) - pd.to_datetime(column2, utc=True)).dt.days return column1 - column2 def __subtract_value(self, original_column, value, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return ( pd.to_datetime(original_column, utc=True) - pd.to_timedelta(value, unit=time_unit) ).dt.strftime('%Y-%m-%d %H:%M:%S') return original_column - value mage_ai/data_cleaner/transformer_actions/udf/date_trunc.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF import numpy as np import pandas as pd class DateTrunc(BaseUDF): def execute(self): date_part = self.options['date_part'] date_column = self.arguments[0] df_copy = self.df.copy() df_copy[date_column] = pd.to_datetime(df_copy[date_column]) if date_part == 'week': return (df_copy[date_column] - df_copy[date_column].dt.weekday * np.timedelta64(1, 'D')).\ dt.strftime('%Y-%m-%d') raise Exception(f'Date part {date_part} is not supported.') mage_ai/data_cleaner/transformer_actions/udf/constant.py METASEP from data_cleaner.transformer_actions.udf.base import BaseUDF class Constant(BaseUDF): def execute(self): return self.arguments[0] mage_ai/data_cleaner/transformer_actions/udf/base.py METASEP import importlib class BaseUDF(): def __init__(self, df, arguments=[], code=None, options={}, kwargs={}): self.df = df self.arguments = arguments self.code = code self.options = options self.kwargs = kwargs def execute(self): pass def execute_udf(udf_name, df, arguments, code, options, kwargs): udf_class = getattr( importlib.import_module(f'data_cleaner.transformer_actions.udf.{udf_name}'), udf_name.title().replace('_', ''), ) return udf_class(df, arguments, code, options, kwargs).execute() mage_ai/data_cleaner/transformer_actions/udf/addition.py METASEP from data_cleaner.column_type_detector import DATETIME from data_cleaner.transformer_actions.udf.base import BaseUDF import pandas as pd class Addition(BaseUDF): def execute(self): col1 = self.arguments[0] df_result = self.df[col1] column_type = self.options.get("column_type", self.kwargs.get('column_types', {}).get(col1)) if len(self.arguments) == 1 and 'value' not in self.options: raise Exception('Require second column or a value to add.') if len(self.arguments) > 1: for col in self.arguments[1:]: df_result = df_result + self.df[col] if self.options.get('value') is not None: df_result = self.__add_value( df_result, self.options['value'], column_type=column_type, options=self.options, ) return df_result def __add_value(self, original_column, value, column_type=None, options={}): if column_type == DATETIME: time_unit = options.get('time_unit', 'd') return ( pd.to_datetime(original_column, utc=True) + pd.to_timedelta(value, unit=time_unit) ).dt.strftime('%Y-%m-%d %H:%M:%S') return original_column + value mage_ai/data_cleaner/transformer_actions/udf/__init__.py METASEP mage_ai/tests/data_cleaner/test_column_type_detector.py METASEP from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, EMAIL, MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES, NUMBER, NUMBER_WITH_DECIMALS, PHONE_NUMBER, TEXT, TRUE_OR_FALSE, ZIP_CODE, get_mismatched_row_count, infer_column_types, ) from tests.base_test import TestCase from faker import Faker import pandas as pd fake = Faker() class ColumnTypeDetectorTests(TestCase): def test_get_mismatched_row_count(self): df = pd.DataFrame([ [1, '[email protected]', '32132'], [2, '[email protected]', '12345'], [3, 'test', '1234'], [4, '[email protected]', 'abcde'], [5, 'abc12345@', '54321'], [6, '[email protected]', '56789'], ], columns=['id', 'email', 'zip_code']) count1 = get_mismatched_row_count(df['id'], 'number') count2 = get_mismatched_row_count(df['email'], 'email') count3 = get_mismatched_row_count(df['zip_code'], 'zip_code') self.assertEqual(count1, 0) self.assertEqual(count2, 2) self.assertEqual(count3, 1) def test_infer_column_types(self): columns = [ 'true_or_false', 'number_with_decimals', 'category', 'datetime', 'text', 'number', 'number_with_dollars', 'number_with_percentage', 'zip_code', 'zip_code_with_3_numbers', 'invalid_zip_code', 'email', 'phone_number', 'datetime_abnormal', 'name', ] table = [ [ '1', 3, 'male', '2020-1-1', '1.0', 1, 3, '30%', '10128-1213', 123, 123, '[email protected]', '123-456-7890', 'May 4, 2021, 6:35 PM', fake.name(), ], [ '1', 12.0, 'female', '2020-07-13', ' '.join(['t' for i in range(MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES + 1)]), 2, '$4', '12.32%', 12345, 1234, 1234, '[email protected]', '(123) 456-7890', 'Feb 17, 2021, 2:57 PM', fake.name(), ], [ '1', 0, 'machine', '2020-06-25 01:02', ' '.join(['t' for i in range(MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES + 1)]), 3, '$5,000', '50%', '12345', 12345, 12345, '[email protected]', '1234567890', 'Feb 18, 2021, 2:57 PM', fake.name(), ], [ 0, '40.7', 'mutant', '2020-12-25 01:02:03', ' '.join(['t' for i in range(MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES + 1)]), 4, '$5,000.01', '20%', '12345', 12345, 123456, '[email protected]', '1234567', 'Feb 19, 2021, 2:57 PM', fake.name(), ], [ 0, '40.7', 'alien', '2020-12-25T01:02:03.000Z', ' '.join(['t' for i in range(MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES + 1)]), 4, '-$10128,121.3123', '18%', '01234', 12345, 12, '[email protected]', '(123)456-7890', 'Feb 20, 2021, 2:57 PM', fake.name(), ], ] date_formats = [ '01/1/2019', '1/1/2019', '1/21/2019', '11/1/2019', '2020/01/1', '2020/1/01', '2020/1/1', 'Pending', ] for date_format in date_formats: table.append([ 0, '40.7', 'mutant', date_format, fake.text(), 4, '$5,000.01', '15.32%', '01234', 12345, 12, '[email protected]', '(123)456-7890', 'Feb 18, 2021, 2:57 PM', fake.name(), ]) df = pd.DataFrame(table, columns=columns) column_types = infer_column_types(df) self.assertEqual( column_types, { 'true_or_false': TRUE_OR_FALSE, 'number_with_decimals': NUMBER_WITH_DECIMALS, 'category': CATEGORY, 'datetime': DATETIME, 'text': TEXT, 'number': NUMBER, 'number_with_dollars': NUMBER_WITH_DECIMALS, 'number_with_percentage': NUMBER_WITH_DECIMALS, 'zip_code': ZIP_CODE, 'zip_code_with_3_numbers': ZIP_CODE, 'invalid_zip_code': NUMBER, 'email': EMAIL, 'phone_number': PHONE_NUMBER, 'datetime_abnormal': DATETIME, 'name': TEXT, }, ) mage_ai/tests/data_cleaner/__init__.py METASEP mage_ai/data_cleaner/transformer_actions/variable_replacer.py METASEP from data_cleaner.transformer_actions.constants import VariableType import re def interpolate(text, key, variable_data): """ text: string to operate on key: key to search within text variable_data: dictionary containing data used to interpolate """ regex_replacement = key if variable_data['type'] == VariableType.FEATURE: regex_replacement = variable_data[VariableType.FEATURE]['uuid'] elif variable_data['type'] == VariableType.FEATURE_SET_VERSION: regex_replacement = \ variable_data[VariableType.FEATURE_SET_VERSION][VariableType.FEATURE_SET]['uuid'] regex_pattern = re.compile( '\%__BRACKETS_START__{}__BRACKETS_END__' .format(key) .replace('__BRACKETS_START__', '\{') .replace('__BRACKETS_END__', '\}') ) return re.sub(regex_pattern, regex_replacement, str(text)) def replace_true_false(action_code): regex_pattern_true = re.compile(' true') regex_pattern_false = re.compile(' false') return re.sub( regex_pattern_true, ' True', re.sub(regex_pattern_false, ' False', action_code), ) mage_ai/data_cleaner/transformer_actions/utils.py METASEP from data_cleaner.transformer_actions.constants import ActionType, Axis def columns_to_remove(transformer_actions): arr = filter( lambda x: x['action_type'] == ActionType.REMOVE and x['axis'] == Axis.COLUMN, transformer_actions, ) columns = [] for transformer_action in arr: columns += transformer_action['action_arguments'] return columns mage_ai/data_cleaner/transformer_actions/shared.py METASEP TEST_ACTION = dict( action_type='filter', axis='row', action_code='%{1}.%{1_1} == True and (%{1}.%{1_2} == \"The Quant\" or %{1}.%{1_2} == \"Yield\")', action_arguments=[ '%{1}.%{1_1}', '%{2}.%{2_1}', ], action_options=dict( condition='%{1}.%{1_3} >= %{2}.%{2_2} and %{2}.%{2_2} >= %{1}.%{1_3} - 2592000', default=0, timestamp_feature_a='%{1}.%{1_2}', timestamp_feature_b='%{1}.%{1_3}', window=2592000, ), action_variables={ '1': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='omni'), )), '1_1': dict(type='feature', feature=dict(column_type='number', uuid='deposited')), '1_2': dict(type='feature', feature=dict(column_type='category', uuid='fund')), '1_3': dict(type='feature', feature=dict(column_type='category', uuid='delivered_at')), '1_4': dict(type='feature', feature=dict(column_type='number_with_decimals', uuid='amount')), '2': dict(type='feature_set_version', feature_set_version=dict( feature_set=dict(column_type='category', uuid='magic'), )), '2_1': dict(type='feature', feature=dict(column_type='category', uuid='spell')), '2_2': dict(type='feature', feature=dict(column_type='category', uuid='booked_at')), '3_1': dict(type='feature', feature=dict(column_type='number', uuid='age')), }, ) mage_ai/data_cleaner/transformer_actions/row.py METASEP from data_cleaner.column_type_detector import NUMBER_TYPES from data_cleaner.transformer_actions.constants import VariableType from data_cleaner.transformer_actions.action_code import query_with_action_code import pandas as pd def drop_duplicates(df, action, **kwargs): keep = action.get('action_options', {}).get('keep', 'last') action_args = dict(keep=keep) subset_cols = action.get('action_arguments') if subset_cols is not None and len(subset_cols) > 0: action_args['subset'] = subset_cols return df.drop_duplicates(**action_args) def filter_rows(df, action, **kwargs): """ df: Pandas DataFrame action: TransformerAction serialized into a dictionary """ action_code = action['action_code'] return query_with_action_code(df, action_code, kwargs) def sort_rows(df, action, **kwargs): ascending = action.get('action_options', {}).get('ascending', True) ascendings = action.get('action_options', {}).get('ascendings', []) if len(ascendings) > 0: ascending = ascendings[0] feature_by_uuid = {} if action.get('action_variables'): for _, val in action['action_variables'].items(): feature = val.get('feature') if feature: feature_by_uuid[feature['uuid']] = feature na_indexes = None as_types = {} for idx, uuid in enumerate(action['action_arguments']): feature = feature_by_uuid.get(uuid) if feature and feature['column_type'] in NUMBER_TYPES: as_types[uuid] = float if idx == 0: na_indexes = df[(df[uuid].isnull()) | (df[uuid].astype(str).str.len() == 0)].index bad_df = None if na_indexes is not None: bad_df = df.index.isin(na_indexes) index = (df[~bad_df] if bad_df is not None else df).astype(as_types).sort_values( by=action['action_arguments'], ascending=ascendings if len(ascendings) > 0 else ascending, ).index df_final = df.loc[index] if bad_df is not None: if ascending: return pd.concat([ df.iloc[bad_df], df_final, ]) return pd.concat([ df_final, df.iloc[bad_df], ]) return df_final mage_ai/data_cleaner/transformer_actions/helpers.py METASEP from data_cleaner.column_type_detector import NUMBER, NUMBER_WITH_DECIMALS, TEXT from data_cleaner.transformer_actions.constants import ActionType, Operator, VariableType import numpy as np import re DAY_SECONDS = 86400 HOUR_SECONDS = 3600 def convert_col_type(df_col, col_type): if col_type == NUMBER: return df_col.replace(r'^\s*$', 0, regex=True).fillna(0).astype(np.int64) elif col_type == NUMBER_WITH_DECIMALS: return df_col.dropna().astype(float) elif col_type == TEXT: return df_col.dropna().astype(str) return df_col def convert_value_type(feature_uuid, action, value): action_variables = action.get('action_variables', {}) column_type = None for v in action_variables.values(): if v['type'] == 'feature' and v['feature']['uuid'] == feature_uuid: column_type = v['feature']['column_type'] break if column_type == NUMBER: value = int(value) elif column_type == NUMBER_WITH_DECIMALS: value = float(value) return value def drop_na(df): return df.replace(r'^\s*$', np.nan, regex=True).dropna() def extract_join_feature_set_version_id(payload): if payload['action_type'] != ActionType.JOIN: return None join_feature_set_version_id = payload['action_arguments'][0] if type(join_feature_set_version_id) == str and \ join_feature_set_version_id.startswith('%{'): join_feature_set_version_id = next( v['id'] for v in payload['action_variables'].values() if v['type'] == VariableType.FEATURE_SET_VERSION ) return join_feature_set_version_id def get_column_type(feature_uuid, action): action_variables = action.get('action_variables', {}) column_type = None for v in action_variables.values(): if v['type'] == 'feature' and v['feature']['uuid'] == feature_uuid: column_type = v['feature']['column_type'] break return column_type def get_time_window_str(window_in_seconds): if window_in_seconds is None: return None if window_in_seconds >= DAY_SECONDS: time_window = f'{int(window_in_seconds / DAY_SECONDS)}d' elif window_in_seconds >= HOUR_SECONDS: time_window = f'{int(window_in_seconds / HOUR_SECONDS)}h' else: time_window = f'{window_in_seconds}s' return time_window mage_ai/data_cleaner/transformer_actions/constants.py METASEP class ActionType(): ADD = 'add' AVERAGE = 'average' CLEAN_COLUMN_NAME = 'clean_column_name' COUNT = 'count' COUNT_DISTINCT = 'count_distinct' DIFF = 'diff' DROP_DUPLICATE = 'drop_duplicate' EXPAND_COLUMN = 'expand_column' EXPLODE = 'explode' FILTER = 'filter' FIRST = 'first' GROUP = 'group' IMPUTE = 'impute' JOIN = 'join' LAST = 'last' LIMIT = 'limit' MAX = 'max' MEDIAN = 'median' MIN = 'min' MODE = 'mode' REMOVE = 'remove' SCALE = 'scale' SELECT = 'select' SHIFT_DOWN = 'shift_down' SHIFT_UP = 'shift_up' SORT = 'sort' SUM = 'sum' UNION = 'union' UPDATE_TYPE = 'update_type' UPDATE_VALUE = 'update_value' class Axis(): COLUMN = 'column' ROW = 'row' class VariableType(): FEATURE = 'feature' FEATURE_SET = 'feature_set' FEATURE_SET_VERSION = 'feature_set_version' class Operator(): CONTAINS = 'contains' NOT_CONTAINS = 'not contains' EQUALS = '==' NOT_EQUALS = '!=' GREATER_THAN = '>' GREATER_THAN_OR_EQUALS = '>=' LESS_THAN = '<' LESS_THAN_OR_EQUALS = '<=' mage_ai/data_cleaner/transformer_actions/column.py METASEP from data_cleaner.column_type_detector import REGEX_NUMBER from data_cleaner.transformer_actions.action_code import query_with_action_code from data_cleaner.transformer_actions.helpers import ( convert_col_type, get_column_type, get_time_window_str, ) from data_cleaner.transformer_actions.udf.base import execute_udf from keyword import iskeyword import pandas as pd import numpy as np import re def add_column(df, action, **kwargs): col = action['outputs'][0]['uuid'] col_type = action['outputs'][0]['column_type'] udf = action['action_options'].get('udf') if udf is None: return df df_copy = df.copy() df_copy[col] = execute_udf( udf, df, action.get('action_arguments'), action.get('action_code'), action.get('action_options'), kwargs, ) df_copy[col] = convert_col_type(df_copy[col], col_type) return df_copy def average(df, action, **kwargs): return __agg(df, action, 'mean') def count(df, action, **kwargs): return __groupby_agg(df, action, 'count') def count_distinct(df, action, **kwargs): return __groupby_agg(df, action, 'nunique') def clean_column_name(df, action, **kwargs): columns = action['action_arguments'] mapping = {} for column in columns: orig_name = column if iskeyword(column): column = f'{column}_' column = column.lower() column = re.sub(r'[\s\t\-\.]', '_', column) column = re.sub(r'[^a-z0-9\_]', '', column) column = REGEX_NUMBER.sub(lambda number: f'number_{number.group(0)}', column) if column == 'true' or column == 'false': column = f'{column}_' if iskeyword(column): # check second time if a keyword appears after removing nonalphanum column = f'{column}_' mapping[orig_name] = column return df.rename(columns=mapping) def diff(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].diff() return df def first(df, action, **kwargs): return __agg(df, action, 'first') def impute(df, action, **kwargs): columns = action['action_arguments'] action_options = action['action_options'] strategy = action_options.get('strategy') value = action_options.get('value') empty_string_pattern = r'^\s*$' df[columns] = df[columns].replace(empty_string_pattern, np.nan, regex=True) if strategy == 'average': df[columns] = df[columns].fillna(df[columns].astype(float).mean(axis=0)) elif strategy == 'median': df[columns] = df[columns].fillna(df[columns].astype(float).median(axis=0)) elif strategy == 'column': replacement_df = pd.DataFrame({col: df[value] for col in columns}) df[columns] = df[columns].fillna(replacement_df) elif value is not None: df[columns] = df[columns].fillna(value) else: raise Exception('Require a valid strategy or value') for col in columns: col_type = get_column_type(col, action) df[col] = convert_col_type(df[col], col_type) return df def max(df, action, **kwargs): return __agg(df, action, 'max') def median(df, action, **kwargs): return __agg(df, action, 'median') def min(df, action, **kwargs): return __agg(df, action, 'min') def remove_column(df, action, **kwargs): cols = action['action_arguments'] original_columns = df.columns drop_columns = [col for col in cols if col in original_columns] return df.drop(columns=drop_columns) def last(df, action, **kwargs): return __agg(df, action, 'last') def select(df, action, **kwargs): return df[action['action_arguments']] def shift_down(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] action_options = action.get('action_options', {}) groupby_columns = action_options.get('groupby_columns') periods = action_options.get('periods', 1) if groupby_columns is not None: df[output_col] = df.groupby(groupby_columns)[action['action_arguments'][0]].shift(periods) else: df[output_col] = df[action['action_arguments'][0]].shift(periods) return df def shift_up(df, action, **kwargs): output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].shift(-1) return df def sum(df, action, **kwargs): return __agg(df, action, 'sum') def __agg(df, action, agg_method): if action['action_options'].get('groupby_columns'): return __groupby_agg(df, action, agg_method) else: output_col = action['outputs'][0]['uuid'] df[output_col] = df[action['action_arguments'][0]].agg(agg_method) return df def __column_mapping(action): return dict(zip(action['action_arguments'], [o['uuid'] for o in action['outputs']])) # Filter by timestamp_feature_a - window <= timestamp_feature_b <= timestamp_feature_a def __filter_df_with_time_window(df, action): action_options = action['action_options'] time_window_keys = ['timestamp_feature_a', 'timestamp_feature_b', 'window'] if all(k in action_options for k in time_window_keys): window_in_seconds = action_options['window'] df_time_diff = \ (pd.to_datetime(df[action_options['timestamp_feature_a']], utc=True) - \ pd.to_datetime(df[action_options['timestamp_feature_b']], utc=True)).dt.total_seconds() if window_in_seconds > 0: df_time_diff_filtered = \ df_time_diff[(df_time_diff <= window_in_seconds) & (df_time_diff >= 0)] else: df_time_diff_filtered = \ df_time_diff[(df_time_diff >= window_in_seconds) & (df_time_diff <= 0)] df_filtered = df.loc[df_time_diff_filtered.index] time_window = get_time_window_str(window_in_seconds) else: df_filtered = df time_window = None return df_filtered, time_window def __groupby_agg(df, action, agg_method): df_filtered, _ = __filter_df_with_time_window(df, action) action_code = action.get('action_code') if action_code is not None and action_code != '': df_filtered = query_with_action_code(df_filtered, action_code, { 'original_df': df_filtered, }) action_options = action['action_options'] df_agg = df_filtered.groupby( action_options['groupby_columns'], )[action['action_arguments']].agg(agg_method) return df.merge( df_agg.rename(columns=__column_mapping(action)), on=action_options['groupby_columns'], how='left', ) mage_ai/data_cleaner/transformer_actions/base.py METASEP from data_cleaner.transformer_actions import column, row from data_cleaner.transformer_actions.constants import ActionType, Axis, VariableType from data_cleaner.transformer_actions.helpers import drop_na from data_cleaner.transformer_actions.variable_replacer import ( interpolate, replace_true_false, ) # from pipelines.column_type_pipelines import COLUMN_TYPE_PIPELINE_MAPPING import json COLUMN_TYPE_PIPELINE_MAPPING = {} FUNCTION_MAPPING = { Axis.COLUMN: { ActionType.ADD: column.add_column, ActionType.AVERAGE: column.average, ActionType.CLEAN_COLUMN_NAME: column.clean_column_name, ActionType.COUNT: column.count, ActionType.COUNT_DISTINCT: column.count_distinct, ActionType.DIFF: column.diff, # ActionType.EXPAND_COLUMN: column.expand_column, ActionType.FIRST: column.first, ActionType.IMPUTE: column.impute, ActionType.LAST: column.last, ActionType.MAX: column.max, ActionType.MEDIAN: column.median, ActionType.MIN: column.min, ActionType.REMOVE: column.remove_column, ActionType.SELECT: column.select, ActionType.SHIFT_DOWN: column.shift_down, ActionType.SHIFT_UP: column.shift_up, ActionType.SUM: column.sum, }, Axis.ROW: { ActionType.DROP_DUPLICATE: row.drop_duplicates, # ActionType.EXPLODE: row.explode, ActionType.FILTER: row.filter_rows, ActionType.SORT: row.sort_rows, }, } class BaseAction(): def __init__(self, action): self.action = action self.columns_by_type = {} for variable_data in self.action.get('action_variables', {}).values(): if not variable_data: continue feature = variable_data.get(VariableType.FEATURE) if not feature: continue column_type = feature.get('column_type') if not self.columns_by_type.get(column_type): self.columns_by_type[column_type] = [] self.columns_by_type[column_type].append(feature['uuid']) @property def action_type(self): return self.action['action_type'] @property def axis(self): return self.action['axis'] def execute(self, df, **kwargs): self.hydrate_action() self.action['action_code'] = replace_true_false(self.action['action_code']) if df.empty: return df if self.action_type in [ActionType.FILTER, ActionType.ADD]: df_transformed = self.transform(df) else: df_transformed = df if self.action_type == ActionType.GROUP: df_output = self.groupby(df, self.action) elif self.action_type == ActionType.JOIN: df_to_join = kwargs.get('df_to_join') df_output = self.join(df, df_to_join, self.action) else: column_types = {} for column_type, cols in self.columns_by_type.items(): for col in cols: column_types[col] = column_type df_output = FUNCTION_MAPPING[self.axis][self.action_type]( df_transformed, self.action, column_types=column_types, original_df=df, ) if self.action_type == ActionType.FILTER: return df.loc[df_output.index][df_output.columns] elif self.action_type == ActionType.ADD: output_cols = [f['uuid'] for f in self.action['outputs']] df[output_cols] = df_output[output_cols] return df else: return df_output def groupby(self, df, action): def __transform_partition(pdf, actions): for action in actions: pdf = BaseAction(action).execute(pdf) return pdf groupby_columns = action['action_arguments'] return df.groupby(groupby_columns).apply(lambda x: __transform_partition(x, action['child_actions'])) def hydrate_action(self): for k, v in self.action['action_variables'].items(): """ k: 1, 1_1 v: { 'type': 'feature', 'id': 1, 'feature': { 'uuid': 'mage', }, } """ if not v: continue if self.action.get('action_code'): self.action['action_code'] = interpolate(self.action['action_code'], k, v) if self.action.get('action_arguments'): self.action['action_arguments'] = [interpolate( args_text, k, v, ) for args_text in self.action['action_arguments']] if self.action.get('action_options'): action_options_json = json.dumps(self.action['action_options']) self.action['action_options'] = json.loads(interpolate(action_options_json, k, v)) def join(self, df, df_to_join, action): action_options = action['action_options'] left_on = action_options['left_on'] right_on = action_options['right_on'] for i in range(len(left_on)): col1, col2 = left_on[i], right_on[i] if df[col1].dtype != df_to_join[col2].dtype: df[col1] = drop_na(df[col1]).astype(str) df_to_join[col2] = drop_na(df_to_join[col2]).astype(str) if action.get('outputs') is not None: feature_rename_mapping = { f['source_feature']['uuid']:f['uuid'] for f in action['outputs'] if f.get('source_feature') is not None } df_to_join_renamed = df_to_join.rename(columns=feature_rename_mapping) right_on = [feature_rename_mapping.get(key, key) for key in right_on] else: df_to_join_renamed = df_to_join how = action_options.get('how', 'left') df_merged = df.merge(df_to_join_renamed, left_on=left_on, right_on=right_on, how=how) drop_columns = action_options.get('drop_columns', []) rename_columns = action_options.get('rename_columns', {}) return df_merged.drop(columns=drop_columns).rename(columns=rename_columns) def transform(self, df): df_copy = df.copy() current_columns = df_copy.columns for column_type, original_columns in self.columns_by_type.items(): cols = [col for col in original_columns if col in current_columns] if len(cols) == 0: continue build_pipeline = COLUMN_TYPE_PIPELINE_MAPPING.get(column_type) if not build_pipeline: continue df_copy[cols] = build_pipeline().fit_transform(df_copy[cols]) return df_copy mage_ai/data_cleaner/transformer_actions/action_code.py METASEP from data_cleaner.transformer_actions.constants import Operator import re ACTION_CODE_CONDITION_PATTERN = re.compile( r'([^\s()]+) ([!=<>]+|(?:contains)|(?:not contains)) ([^\s()]+)' ) ORIGINAL_COLUMN_PREFIX = 'orig_' TRANSFORMED_COLUMN_PREFIX = 'tf_' def __query_mutate_null_type(match, dtype): condition = [''] column_name, operator, _ = match.groups() column_name = f'{ORIGINAL_COLUMN_PREFIX}{column_name}' if operator == '==': condition.append(f'({column_name}.isna()') if dtype == bool: condition.append(f' | {column_name} == \'\'') elif dtype == str: condition.append(f' | {column_name}.str.len() == 0') condition.append(f')') else: condition.append(f'({column_name}.notna()') if dtype == bool: condition.append(f' & {column_name} != \'\'') elif dtype == str: condition.append(f' & {column_name}.str.len() >= 1') condition.append(f')') return ''.join(condition) def __query_mutate_contains_op(match): column_name, operator, value = match.groups() column_name = f'{TRANSFORMED_COLUMN_PREFIX}{column_name}' value = value.strip('\'').strip('\"') if operator == Operator.CONTAINS: condition = f'({column_name}.notna() & {column_name}.str.contains(\'{value}\'))' else: condition = f'~({column_name}.notna() & {column_name}.str.contains(\'{value}\'))' return condition def __query_mutate_default_case(match, column_set): column_name, operator, value = match.groups() column_name = f'{TRANSFORMED_COLUMN_PREFIX}{column_name}' if value in column_set: # if comparison is with another column, prefix value with column identifier value = f'{TRANSFORMED_COLUMN_PREFIX}{value}' return f'{column_name} {operator} {value}' def __get_column_type(df, cache, column_name): dtype = cache.get(column_name, None) if dtype is None: dropped_na = df[column_name].dropna() dropped_na = dropped_na[~dropped_na.isin([''])] dtype = type(dropped_na.iloc[0]) if len(dropped_na.index) >= 1 else object cache[column_name] = dtype return dtype def query_with_action_code(df, action_code, kwargs): transformed_types, original_types = {}, {} original_df, original_merged = kwargs.get('original_df', None), False reconstructed_code = [] queried_df = df.copy().add_prefix(TRANSFORMED_COLUMN_PREFIX) column_set = set(df.columns) prev_end = 0 for match in ACTION_CODE_CONDITION_PATTERN.finditer(action_code): column_name, operator, value = match.groups() reconstructed_code.append(action_code[prev_end: match.start()]) prev_end = match.end() if operator == Operator.CONTAINS or operator == Operator.NOT_CONTAINS: transformed_dtype = __get_column_type(df, transformed_types, column_name) if transformed_dtype != str: raise TypeError( f'\'{operator}\' can only be used on string columns, {transformed_dtype}' ) reconstructed_code.append(__query_mutate_contains_op(match)) elif (operator == Operator.EQUALS or operator == Operator.NOT_EQUALS) and value == 'null': if original_df is None: raise Exception( 'Null value queries require original dataframe as keyword argument' ) elif not original_merged: queried_df = queried_df.join(original_df.add_prefix(ORIGINAL_COLUMN_PREFIX)) original_merged = True original_dtype = __get_column_type(original_df, original_types, column_name) reconstructed_code.append(__query_mutate_null_type(match, original_dtype)) else: reconstructed_code.append(__query_mutate_default_case(match, column_set)) reconstructed_code.append(action_code[prev_end:]) action_code = ''.join(reconstructed_code) queried_df = queried_df.query(action_code).rename( lambda x: x[len(TRANSFORMED_COLUMN_PREFIX):], axis='columns' ) return queried_df[df.columns] mage_ai/data_cleaner/transformer_actions/__init__.py METASEP mage_ai/data_cleaner/tests/__init__.py METASEP mage_ai/data_cleaner/statistics/calculator.py METASEP from data_cleaner.shared.hash import merge_dict from data_cleaner.shared.multi import run_parallel from data_cleaner.column_type_detector import ( DATETIME, NUMBER, NUMBER_TYPES, NUMBER_WITH_DECIMALS, get_mismatched_row_count, ) import math import numpy as np import pandas as pd import traceback VALUE_COUNT_LIMIT = 255 def increment(metric, tags): pass class timer(object): """ with timer('metric.metric', tags={ 'key': 'value' }): function() """ def __init__(self, metric, tags={}): pass def __enter__(self): pass def __exit__(self, type, value, traceback): pass class StatisticsCalculator(): def __init__( self, # s3_client, # object_key_prefix, # feature_set_version, column_types, **kwargs, ): self.column_types = column_types @property def data_tags(self): return dict() def process(self, df): return self.calculate_statistics_overview(df) def calculate_statistics_overview(self, df): increment( 'lambda.transformer_actions.calculate_statistics_overview.start', self.data_tags, ) with timer( 'lambda.transformer_actions.calculate_statistics_overview.time', self.data_tags): data = dict(count=len(df.index)) arr_args_1 = [df[col] for col in df.columns], arr_args_2 = [col for col in df.columns], dicts = run_parallel(self.statistics_overview, arr_args_1, arr_args_2) for d in dicts: data.update(d) # object_key = s3_paths.path_statistics_overview(self.object_key_prefix) # s3_data.upload_json_sorted(self.s3_client, object_key, data) increment( 'lambda.transformer_actions.calculate_statistics_overview.success', self.data_tags, ) return data def statistics_overview(self, series, col): try: return self.__statistics_overview(series, col) except Exception as err: increment( 'lambda.transformer_actions.calculate_statistics_overview.column.failed', merge_dict(self.data_tags, { 'col': col, 'error': err.__class__.__name__, }), ) traceback.print_exc() return {} def __statistics_overview(self, series, col): # The following regex based replace has high overheads # series = series.replace(r'^\s*$', np.nan, regex=True) series_cleaned = series.map(lambda x: x if (not isinstance(x, str) or (len(x) > 0 and not x.isspace())) else np.nan) df_value_counts = series_cleaned.value_counts(dropna=False) df = df_value_counts.reset_index() df.columns = [col, 'count'] df_top_value_counts = df if df.shape[0] > VALUE_COUNT_LIMIT: df_top_value_counts = df.head(VALUE_COUNT_LIMIT) # TODO: remove duplicate data for distinct values # object_key_distinct_values = s3_paths.path_distinct_values_by_column(self.object_key_prefix, col) # s3_data.upload_dataframe(self.s3_client, df_top_value_counts, object_key_distinct_values, columns=[col]) # object_key_statistics = s3_paths.path_statistics_by_column(self.object_key_prefix, col) # s3_data.upload_dataframe(self.s3_client, df_top_value_counts, object_key_statistics) # features = self.feature_set_version['features'] # feature = find(lambda x: x['uuid'] == col, features) # if feature and feature.get('transformed'): # return {} column_type = self.column_types.get(col) series_non_null = series_cleaned.dropna() if column_type == NUMBER: series_non_null = series_non_null.astype(float).astype(int) elif column_type == NUMBER_WITH_DECIMALS: series_non_null = series_non_null.astype(float) count_unique = len(df_value_counts.index) data = { f'{col}/count': series_non_null.size, f'{col}/count_distinct': count_unique - 1 if np.nan in df_value_counts else count_unique, f'{col}/null_value_rate': 0 if series_cleaned.size == 0 else series_cleaned.isnull().sum() / series_cleaned.size, f'{col}/null_value_count': series_cleaned.isnull().sum(), } if len(series_non_null) == 0: return data dates = None if column_type in NUMBER_TYPES: data[f'{col}/average'] = series_non_null.sum() / len(series_non_null) data[f'{col}/max'] = series_non_null.max() data[f'{col}/median'] = series_non_null.quantile(0.5) data[f'{col}/min'] = series_non_null.min() data[f'{col}/sum'] = series_non_null.sum() elif column_type == DATETIME: dates = pd.to_datetime(series_non_null, utc=True, errors='coerce').dropna() data[f'{col}/max'] = dates.max().isoformat() data[f'{col}/median'] = dates.sort_values().iloc[math.floor(len(dates) / 2)].isoformat() data[f'{col}/min'] = dates.min().isoformat() if column_type not in NUMBER_TYPES: if dates is not None: value_counts = dates.value_counts() else: value_counts = series_non_null.value_counts() mode = value_counts.index[0] if column_type == DATETIME: mode = mode.isoformat() data[f'{col}/mode'] = mode # Detect mismatched formats for some column types data[f'{col}/mismatched_count'] = get_mismatched_row_count(series, column_type) return data mage_ai/data_cleaner/statistics/__init__.py METASEP mage_ai/data_cleaner/shared/utils.py METASEP from data_cleaner.column_type_detector import ( NUMBER, NUMBER_WITH_DECIMALS, ) import numpy as np def clean_series(series, column_type, dropna=True): series_cleaned = series.map( lambda x: x if (not isinstance(x, str) or (len(x) > 0 and not x.isspace())) else np.nan, ) if dropna: series_cleaned = series_cleaned.dropna() if column_type == NUMBER: try: series_cleaned = series_cleaned.astype(float).astype(int) except ValueError: series_cleaned = series_cleaned.astype(float) elif column_type == NUMBER_WITH_DECIMALS: series_cleaned = series_cleaned.astype(float) return series_cleaned mage_ai/data_cleaner/shared/multi.py METASEP from concurrent.futures import ThreadPoolExecutor from threading import Thread MAX_WORKERS = 16 def start_thread(target, **kwargs): thread = Thread( target=target, kwargs=kwargs, ) thread.start() return thread def parallelize(func, arr): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, arr) def parallelize_multiple_args(func, arr_args): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, *zip(*arr_args)) def run_parallel_threads(list_of_funcs_and_args_or_kwargs): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: for func, args in list_of_funcs_and_args_or_kwargs: pool.submit(func, *args) def run_parallel(func, arr_args_1, arr_args_2): with ThreadPoolExecutor(max_workers=MAX_WORKERS) as pool: return pool.map(func, *arr_args_1, *arr_args_2) mage_ai/data_cleaner/shared/hash.py METASEP from functools import reduce import math import re def dig(obj_arg, arr_or_string): if type(arr_or_string) is str: arr_or_string = arr_or_string.split('.') arr = list(map(str.strip, arr_or_string)) def _build(obj, key): tup = re.split(r'\[(\d+)\]$', key) if len(tup) >= 2: key, index = filter(lambda x: x, tup) if key and index: return obj[key][int(index)] elif index: return obj[int(index)] elif obj: return obj.get(key) else: return obj return reduce(_build, arr, obj_arg) def flatten(input_data): final_data = {} for k1, v1 in input_data.items(): if type(v1) is dict: for k2, v2 in v1.items(): if type(v2) is dict: for k3, v3 in v2.items(): final_data[f'{k1}_{k2}_{k3}'] = v3 else: final_data[f'{k1}_{k2}'] = v2 else: final_data[k1] = v1 return final_data def ignore_keys(d, keys): d_keys = d.keys() d2 = d.copy() for key in keys: if key in d_keys: d2.pop(key) return d2 def ignore_keys_with_blank_values(d): d2 = d.copy() for key, value in d.items(): if not value: d2.pop(key) return d2 def extract(d, keys): def _build(obj, key): val = d.get(key, None) if val is not None: obj[key] = val return obj return reduce(_build, keys, {}) def extract_arrays(input_data): arr = [] for k, v in input_data.items(): if type(v) is list: arr.append(v) return arr def group_by(func, arr): def _build(obj, item): val = func(item) if not obj.get(val): obj[val] = [] obj[val].append(item) return obj return reduce(_build, arr, {}) def index_by(func, arr): obj = {} for item in arr: key = func(item) obj[key] = item return obj def merge_dict(a, b): c = a.copy() c.update(b) return c def replace_dict_nan_value(d): def _replace_nan_value(v): if type(v) == float and math.isnan(v): return None return v return {k: _replace_nan_value(v) for k, v in d.items()} mage_ai/data_cleaner/shared/array.py METASEP import random def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)] def difference(li1, li2): li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2] return li_dif def flatten(arr): return [item for sublist in arr for item in sublist] def find(condition, arr, map=None): try: return next(map(x) if map else x for x in arr if condition(x)) except StopIteration: return None def sample(arr): return arr[random.randrange(0, len(arr))] def subtract(arr1, arr2): return [i for i in arr1 if i not in arr2] mage_ai/data_cleaner/shared/__init__.py METASEP mage_ai/data_cleaner/pipelines/base.py METASEP from data_cleaner.cleaning_rules.remove_columns_with_high_empty_rate \ import RemoveColumnsWithHighEmptyRate from data_cleaner.cleaning_rules.remove_columns_with_single_value \ import RemoveColumnsWithSingleValue from data_cleaner.cleaning_rules.remove_duplicate_rows \ import RemoveDuplicateRows from data_cleaner.transformer_actions.base import BaseAction DEFAULT_RULES = [ RemoveColumnsWithHighEmptyRate, RemoveColumnsWithSingleValue, RemoveDuplicateRows, ] class BasePipeline(): def __init__(self, actions=[]): self.actions = actions self.rules = DEFAULT_RULES def create_actions(self, df, column_types, statistics): all_suggestions = [] for rule in self.rules: suggestions = rule(df, column_types, statistics).evaluate() if suggestions: all_suggestions += suggestions self.actions = all_suggestions return all_suggestions def transform(self, df): if len(self.actions) == 0: print('Pipeline is empty.') return df df_transformed = df for action in self.actions: df_transformed = BaseAction(action['action_payload']).execute(df_transformed) return df_transformed mage_ai/data_cleaner/pipelines/__init__.py METASEP mage_ai/data_cleaner/cleaning_rules/unit_conversion.py METASEP mage_ai/data_cleaner/cleaning_rules/type_conversion.py METASEP mage_ai/data_cleaner/cleaning_rules/remove_outliers.py METASEP mage_ai/data_cleaner/cleaning_rules/remove_duplicate_rows.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveDuplicateRows(BaseRule): def evaluate(self): df_dedupe = self.df.drop_duplicates() duplicate_row_count = self.df.shape[0] - df_dedupe.shape[0] suggestions = [] if duplicate_row_count > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove duplicate rows', f'There\'re {duplicate_row_count} duplicate rows in the dataset. '\ 'Suggest to remove them.', ActionType.DROP_DUPLICATE, action_arguments=[], axis=Axis.ROW, )) return suggestions mage_ai/data_cleaner/cleaning_rules/remove_columns_with_single_value.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveColumnsWithSingleValue(BaseRule): # Check statistic [feature_uuid]/count_distinct def evaluate(self): columns_with_single_value = [] for c in self.df_columns: if f'{c}/count_distinct' not in self.statistics: continue feature_count_distinct = self.statistics[f'{c}/count_distinct'] if feature_count_distinct == 1: columns_with_single_value.append(c) suggestions = [] suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with single value', f'The following columns have single value in all rows: {columns_with_single_value}.'\ ' Suggest to remove them.', ActionType.REMOVE, action_arguments=columns_with_single_value, axis=Axis.COLUMN, )) return suggestions mage_ai/data_cleaner/cleaning_rules/remove_columns_with_high_empty_rate.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.transformer_actions.constants import ( ActionType, Axis, ) class RemoveColumnsWithHighEmptyRate(BaseRule): MISSING_RATE_THRESHOLD = 0.8 def evaluate(self): columns_with_missing_values = [] columns_with_no_values = [] for c in self.df_columns: if self.statistics.get(f'{c}/count') == 0: columns_with_no_values.append(c) elif f'{c}/null_value_rate' in self.statistics: null_value_rate = self.statistics[f'{c}/null_value_rate'] if null_value_rate >= self.MISSING_RATE_THRESHOLD: columns_with_missing_values.append(c) suggestions = [] if len(columns_with_no_values) > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with no values', f'The following columns have no values: {columns_with_no_values}.'\ ' Removing them may increase your data quality.', ActionType.REMOVE, action_arguments=columns_with_no_values, axis=Axis.COLUMN, )) if len(columns_with_missing_values) > 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove columns with high empty rate', f'The following columns have high empty rate: {columns_with_missing_values}.'\ ' Removing them may increase your data quality.', ActionType.REMOVE, action_arguments=columns_with_missing_values, axis=Axis.COLUMN, )) return suggestions mage_ai/data_cleaner/cleaning_rules/remove_collinear_columns.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.column_type_detector import NUMBER_TYPES from data_cleaner.transformer_actions.constants import ActionType, Axis import numpy as np class RemoveCollinearColumns(BaseRule): EPSILON = 1e-12 MIN_ENTRIES = 3 ROW_SAMPLE_SIZE = 300 VIF_UB = 5 def __init__(self, df, column_types, statistics): super().__init__(df, column_types, statistics) self.numeric_df, self.numeric_columns = self.filter_numeric_types() self.numeric_indices = np.arange(len(self.numeric_df)) def evaluate(self): suggestions = [] if self.numeric_df.empty or len(self.numeric_df) < self.MIN_ENTRIES: return suggestions collinear_columns = [] for column in self.numeric_columns[:-1]: variance_inflation_factor = self.get_variance_inflation_factor(column) if variance_inflation_factor > self.VIF_UB: collinear_columns.append(column) self.numeric_df.drop(column, axis=1, inplace=True) if len(collinear_columns) != len(self.numeric_columns)-1: # check the final column if and only if there are other columns to compare it to column = self.numeric_columns[-1] variance_inflation_factor = self.get_variance_inflation_factor(column) if variance_inflation_factor > self.VIF_UB: collinear_columns.append(column) if len(collinear_columns) != 0: suggestions.append(self._build_transformer_action_suggestion( 'Remove collinear columns', 'The following columns are strongly correlated ' f'with other columns in the dataset: {collinear_columns}. ' 'Removing these columns may increase data quality ' 'by removing redundant and closely related data.', ActionType.REMOVE, action_arguments=collinear_columns, axis=Axis.COLUMN, )) return suggestions def filter_numeric_types(self): cleaned_df = self.df.replace('^\s*$', np.nan, regex=True) numeric_columns = [] for column in self.df_columns: if self.column_types[column] in NUMBER_TYPES: cleaned_df[column] = cleaned_df[column].astype(float) numeric_columns.append(column) else: cleaned_df.drop(column, axis=1, inplace=True) cleaned_df = cleaned_df.dropna(axis=0) return cleaned_df, numeric_columns def get_variance_inflation_factor(self, column): """ Variance Inflation Factor = 1 / (1 - <coefficient of determination on column k>) Measures increase in regression model variance due to collinearity => column k is multicollinear with others if model predicting its value has this variance inflation greater than some amount """ if self.numeric_df.empty: raise RuntimeError('No other columns to compare \'{column}\' against') if len(self.numeric_df) > self.ROW_SAMPLE_SIZE: sample = self.numeric_df.sample(self.ROW_SAMPLE_SIZE) else: sample = self.numeric_df responses = sample[column].to_numpy() predictors = sample.drop(column, axis=1).to_numpy() params, _, _, _ = np.linalg.lstsq(predictors, responses, rcond=None) predictions = predictors @ params sum_sq_model = np.sum(predictions * predictions) sum_sq_to = np.sum(responses * responses) r_sq = sum_sq_model / sum_sq_to return 1 / (1 - r_sq + self.EPSILON) mage_ai/data_cleaner/cleaning_rules/reformat_values.py METASEP mage_ai/data_cleaner/cleaning_rules/impute_values.py METASEP mage_ai/data_cleaner/cleaning_rules/fix_encoding.py METASEP mage_ai/data_cleaner/cleaning_rules/clean_column_names.py METASEP from data_cleaner.cleaning_rules.base import BaseRule from data_cleaner.column_type_detector import REGEX_NUMBER from data_cleaner.transformer_actions.constants import ActionType from keyword import iskeyword import re class CleanColumnNames(BaseRule): INVALID_COLUMN_CHARS = re.compile(r'([^a-z\_0-9])') UPPERCASE_PATTERN = re.compile(r'[A-Z]') def evaluate(self): """ Rule: 1. If column name contains an invalid character, suggest cleaning (remove all characters) 2. If column name is a reserved python keyword, suggest cleaning (pad with symbols) 3. If column is of mixedcase, suggest cleaning (convert to lowercase) 4. If column contains only numbers, suggest cleaning (pad with letters) 5. If column contains dashes, convert to underscore """ matches = [] for column in self.df_columns: if self.INVALID_COLUMN_CHARS.search(column) != None: matches.append(column) elif REGEX_NUMBER.search(column) != None: matches.append(column) else: column = column.lower().strip() if column == 'true' or column == 'false': matches.append(column) elif iskeyword(column): matches.append(column) suggestions = [] if len(matches) != 0: suggestions.append(self._build_transformer_action_suggestion( 'Clean dirty column names', 'The following columns have unclean naming conventions: ' f'{matches}. ' 'Making these names lowercase and alphanumeric may improve' 'ease of dataset access and reduce security risks.', action_type=ActionType.CLEAN_COLUMN_NAME, action_arguments=matches, axis='column' )) return suggestions mage_ai/data_cleaner/cleaning_rules/base.py METASEP class BaseRule: def __init__(self, df, column_types, statistics): self.df = df self.df_columns = df.columns.tolist() self.column_types = column_types self.statistics = statistics def evaluate(self): """Evaluate data cleaning rule and generate suggested actions Returns ------- A list of suggested actions """ return [] def _build_transformer_action_suggestion( self, title, message, action_type, action_arguments=[], action_code='', action_options={}, action_variables={}, axis='column', outputs=[], ): return dict( title=title, message=message, action_payload=dict( action_type=action_type, action_arguments=action_arguments, action_code=action_code, action_options=action_options, action_variables=action_variables, axis=axis, outputs=outputs, ), ) mage_ai/data_cleaner/cleaning_rules/__init__.py METASEP mage_ai/data_cleaner/analysis/constants.py METASEP CHART_TYPE_BAR_HORIZONTAL = 'bar_horizontal' CHART_TYPE_LINE_CHART = 'line_chart' CHART_TYPE_HISTOGRAM = 'histogram' LABEL_TYPE_RANGE = 'range' DATA_KEY_CHARTS = 'charts' DATA_KEY_CORRELATION = 'correlations' DATA_KEY_OVERVIEW = 'overview' DATA_KEY_TIME_SERIES = 'time_series' DATA_KEYS = [ DATA_KEY_CHARTS, DATA_KEY_CORRELATION, DATA_KEY_OVERVIEW, DATA_KEY_TIME_SERIES, ] mage_ai/data_cleaner/analysis/charts.py METASEP from data_cleaner.analysis.constants import ( CHART_TYPE_BAR_HORIZONTAL, CHART_TYPE_LINE_CHART, CHART_TYPE_HISTOGRAM, DATA_KEY_TIME_SERIES, LABEL_TYPE_RANGE, ) from data_cleaner.shared.utils import clean_series from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ) import dateutil.parser import math import numpy as np import pandas as pd DD_KEY = 'lambda.analysis_charts' BUCKETS = 40 TIME_SERIES_BUCKETS = 40 def increment(metric, tags={}): pass def build_buckets(min_value, max_value, max_buckets, column_type): diff = max_value - min_value total_interval = 1 + diff bucket_interval = total_interval / max_buckets number_of_buckets = max_buckets is_integer = False parts = str(diff).split('.') if len(parts) == 1: is_integer = True else: is_integer = int(parts[1]) == 0 if NUMBER == column_type and total_interval <= max_buckets and is_integer: number_of_buckets = int(total_interval) bucket_interval = 1 elif bucket_interval > 1: bucket_interval = math.ceil(bucket_interval) else: bucket_interval = round(bucket_interval * 100, 1) / 100 buckets = [] for i in range(number_of_buckets): min_v = min_value + (i * bucket_interval) max_v = min_value + ((i + 1) * bucket_interval) buckets.append(dict( max_value=max_v, min_value=min_v, values=[], )) return buckets, bucket_interval def build_histogram_data(col1, series, column_type): increment(f'{DD_KEY}.build_histogram_data.start', dict(feature_uuid=col1)) max_value = series.max() min_value = series.min() buckets, bucket_interval = build_buckets(min_value, max_value, BUCKETS, column_type) if bucket_interval == 0: return for value in series.values: index = math.floor((value - min_value) / bucket_interval) if value == max_value: index = len(buckets) - 1 buckets[index]['values'].append(value) x = [] y = [] for bucket in buckets: x.append(dict( max=bucket['max_value'], min=bucket['min_value'], )) y.append(dict(value=len(bucket['values']))) increment(f'{DD_KEY}.build_histogram_data.succeeded', dict(feature_uuid=col1)) return dict( type=CHART_TYPE_HISTOGRAM, x=x, x_metadata=dict( label_type=LABEL_TYPE_RANGE, ), y=y, ) def build_correlation_data(df, col1, features): increment(f'{DD_KEY}.build_correlation_data.start', dict(feature_uuid=col1)) x = [] y = [] df_copy = df.copy() for feature in features: col2 = feature['uuid'] column_type = feature['column_type'] series = df_copy[col2] df_copy[col2] = clean_series(series, column_type, dropna=False) corr = df_copy.corr() for feature in features: col2 = feature['uuid'] if col1 != col2: value = corr[col1].get(col2, None) if value is not None: x.append(dict(label=col2)) y.append(dict(value=value)) increment(f'{DD_KEY}.build_correlation_data.succeeded', dict(feature_uuid=col1)) return dict( type=CHART_TYPE_BAR_HORIZONTAL, x=x, y=y, ) def build_time_series_data(df, feature, datetime_column, column_type): col1 = feature['uuid'] column_type = feature['column_type'] tags = dict( column_type=column_type, datetime_column=datetime_column, feature_uuid=col1, ) increment(f'{DD_KEY}.build_time_series_data.start', tags) # print(feature, datetime_column) datetimes = clean_series(df[datetime_column], DATETIME) if datetimes.size <= 1: return min_value_datetime = dateutil.parser.parse(datetimes.min()).timestamp() max_value_datetime = dateutil.parser.parse(datetimes.max()).timestamp() buckets, bucket_interval = build_buckets( min_value_datetime, max_value_datetime, TIME_SERIES_BUCKETS, column_type, ) x = [] y = [] df_copy = df.copy() df_copy[datetime_column] = \ pd.to_datetime(df[datetime_column]).apply(lambda x: x if pd.isnull(x) else x.timestamp()) for bucket in buckets: max_value = bucket['max_value'] min_value = bucket['min_value'] series = df_copy[( df_copy[datetime_column] >= min_value ) & ( df_copy[datetime_column] < max_value )][col1] x.append(dict( max=max_value, min=min_value, )) series_cleaned = clean_series(series, column_type, dropna=False) df_value_counts = series_cleaned.value_counts(dropna=False) series_non_null = series_cleaned.dropna() count_unique = len(df_value_counts.index) y_data = dict( count=series_non_null.size, count_distinct=count_unique - 1 if np.nan in df_value_counts else count_unique, null_value_rate=0 if series_cleaned.size == 0 else series_cleaned.isnull().sum() / series_cleaned.size, ) if column_type in [NUMBER, NUMBER_WITH_DECIMALS]: y_data.update(dict( average=series_non_null.sum() / len(series_non_null), max=series_non_null.max(), median=series_non_null.quantile(0.5), min=series_non_null.min(), sum=series_non_null.sum(), )) elif column_type in [CATEGORY, CATEGORY_HIGH_CARDINALITY, TRUE_OR_FALSE]: value_counts = series_non_null.value_counts() if len(value_counts.index): value_counts_top = value_counts.sort_values(ascending=False).iloc[:12] mode = value_counts_top.index[0] y_data.update(dict( mode=mode, value_counts=value_counts_top.to_dict(), )) y.append(y_data) increment(f'{DD_KEY}.build_time_series_data.succeeded', tags) return dict( type=CHART_TYPE_LINE_CHART, x=x, x_metadata=dict( label=datetime_column, label_type=LABEL_TYPE_RANGE, ), y=y, ) def build_overview_data(df, datetime_features): increment(f'{DD_KEY}.build_overview_data.start') time_series = [] df_copy = df.copy() for feature in datetime_features: column_type = feature['column_type'] datetime_column = feature['uuid'] tags = dict(datetime_column=datetime_column) increment(f'{DD_KEY}.build_overview_time_series.start', tags) if clean_series(df_copy[datetime_column], DATETIME).size <= 1: continue df_copy[datetime_column] = \ pd.to_datetime(df[datetime_column]).apply(lambda x: x if pd.isnull(x) else x.timestamp()) min_value1 = df_copy[datetime_column].min() max_value1 = df_copy[datetime_column].max() buckets, bucket_interval = build_buckets(min_value1, max_value1, TIME_SERIES_BUCKETS, column_type) x = [] y = [] for bucket in buckets: max_value = bucket['max_value'] min_value = bucket['min_value'] df_filtered = df_copy[( df_copy[datetime_column] >= min_value ) & ( df_copy[datetime_column] < max_value )] x.append(dict( max=max_value, min=min_value, )) y.append(dict( count=len(df_filtered.index), )) time_series.append(dict( type=CHART_TYPE_LINE_CHART, x=x, x_metadata=dict( label=datetime_column, label_type=LABEL_TYPE_RANGE, ), y=y, )) increment(f'{DD_KEY}.build_overview_time_series.succeeded', tags) increment(f'{DD_KEY}.build_overview_data.succeeded') return { DATA_KEY_TIME_SERIES: time_series, } mage_ai/data_cleaner/analysis/calculator.py METASEP from data_cleaner.analysis import charts from data_cleaner.analysis.constants import ( DATA_KEY_CHARTS, DATA_KEY_CORRELATION, DATA_KEY_TIME_SERIES, ) from data_cleaner.shared.utils import clean_series from data_cleaner.shared.hash import merge_dict from data_cleaner.shared.multi import run_parallel from data_cleaner.transformer_actions import constants from data_cleaner.column_type_detector import ( CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ) DD_KEY = 'lambda.analysis_calculator' def increment(metric, tags={}): pass class AnalysisCalculator(): def __init__( self, df, column_types, **kwargs, ): self.df = df self.column_types = column_types self.features = [{'uuid': col, 'column_type': column_types.get(col)} for col in df.columns] def process(self, df): increment(f'{DD_KEY}.process.start', self.tags) df_columns = df.columns features_to_use = self.features datetime_features_to_use = [f for f in self.datetime_features if f['uuid'] in df_columns] arr_args_1 = [df for _ in features_to_use], arr_args_2 = features_to_use, data_for_columns = [d for d in run_parallel(self.calculate_column, arr_args_1, arr_args_2)] overview = charts.build_overview_data( df, datetime_features_to_use, ) correlation_overview = [] for d in data_for_columns: corr = d.get(DATA_KEY_CORRELATION) if corr: correlation_overview.append({ 'feature': d['feature'], DATA_KEY_CORRELATION: corr, }) increment(f'{DD_KEY}.process.succeeded', self.tags) return data_for_columns, merge_dict(overview, { DATA_KEY_CORRELATION: correlation_overview, }) @property def features_by_uuid(self): data = {} for feature in self.features: data[feature['uuid']] = feature return data @property def datetime_features(self): return [f for f in self.features if f['column_type'] == DATETIME] @property def tags(self): return dict() def calculate_column(self, df, feature): df_columns = df.columns features_to_use = [f for f in self.features if f['uuid'] in df_columns] datetime_features_to_use = [f for f in self.datetime_features if f['uuid'] in df_columns] col = feature['uuid'] column_type = feature['column_type'] tags = merge_dict(self.tags, dict(column_type=column_type, feature_uuid=col)) increment(f'{DD_KEY}.calculate_column.start', tags) series = df[col] series_cleaned = clean_series(series, column_type) chart_data = [] correlation = [] time_series = [] if column_type in [NUMBER, NUMBER_WITH_DECIMALS]: histogram_data = charts.build_histogram_data(col, series_cleaned, column_type) if histogram_data: chart_data.append(histogram_data) correlation.append(charts.build_correlation_data(df, col, features_to_use)) if column_type in [ CATEGORY, CATEGORY_HIGH_CARDINALITY, NUMBER, NUMBER_WITH_DECIMALS, TRUE_OR_FALSE, ]: time_series = [] for f in datetime_features_to_use: time_series_chart = charts.build_time_series_data(df, feature, f['uuid'], column_type) if time_series_chart: time_series.append(time_series_chart) increment(f'{DD_KEY}.calculate_column.succeeded', tags) return { 'feature': feature, DATA_KEY_CHARTS: chart_data, DATA_KEY_CORRELATION: correlation, DATA_KEY_TIME_SERIES: time_series, } mage_ai/data_cleaner/analysis/__init__.py METASEP cleaning/__init__.py METASEP mage_ai/tests/base_test.py METASEP import unittest class TestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): pass mage_ai/tests/__init__.py METASEP mage_ai/data_cleaner/data_cleaner.py METASEP from data_cleaner import column_type_detector from data_cleaner.analysis.calculator import AnalysisCalculator from data_cleaner.pipelines.base import BasePipeline from data_cleaner.shared.hash import merge_dict from data_cleaner.statistics.calculator import StatisticsCalculator def clean(df): cleaner = DataCleaner() return cleaner.clean(df) class DataCleaner(): def analyze(self, df): """ Analyze a dataframe 1. Detect column types 2. Calculate statisitics 3. Calculate analysis """ column_types = column_type_detector.infer_column_types(df) statistics = StatisticsCalculator(column_types).process(df) analysis = AnalysisCalculator(df, column_types).process(df) return dict( analysis=analysis, column_types=column_types, statistics=statistics, ) def clean(self, df): df_stats = self.analyze(df) pipeline = BasePipeline() suggested_actions = pipeline.create_actions( df, df_stats['column_types'], df_stats['statistics'], ) df_cleaned = pipeline.transform(df) return merge_dict(df_stats, dict( df_cleaned=df_cleaned, suggested_actions=suggested_actions, )) mage_ai/data_cleaner/column_type_detector.py METASEP from data_cleaner.shared.array import subtract import numpy as np import re import warnings DATETIME_MATCHES_THRESHOLD = 0.5 MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES = 40 CATEGORY = 'category' CATEGORY_HIGH_CARDINALITY = 'category_high_cardinality' DATETIME = 'datetime' EMAIL = 'email' NUMBER = 'number' NUMBER_WITH_DECIMALS = 'number_with_decimals' PHONE_NUMBER = 'phone_number' TEXT = 'text' TRUE_OR_FALSE = 'true_or_false' ZIP_CODE = 'zip_code' NUMBER_TYPES = [NUMBER, NUMBER_WITH_DECIMALS] STRING_TYPES = [EMAIL, PHONE_NUMBER, TEXT, ZIP_CODE] COLUMN_TYPES = [ CATEGORY, CATEGORY_HIGH_CARDINALITY, DATETIME, EMAIL, NUMBER, NUMBER_WITH_DECIMALS, PHONE_NUMBER, TEXT, TRUE_OR_FALSE, ZIP_CODE, ] REGEX_DATETIME_PATTERN = r'^[\d]{2,4}-[\d]{1,2}-[\d]{1,2}$|^[\d]{2,4}-[\d]{1,2}-[\d]{1,2}[Tt ]{1}[\d]{1,2}:[\d]{1,2}[:]{0,1}[\d]{1,2}[\.]{0,1}[\d]*|^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$|^\d{1,4}[-\/]{1}\d{1,2}[-\/]{1}\d{1,4}$|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+(\d{1,2})[\s,]+(\d{2,4})' REGEX_EMAIL_PATTERN = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$" REGEX_EMAIL = re.compile(REGEX_EMAIL_PATTERN) REGEX_INTEGER_PATTERN = r'^[\-]{0,1}[\$]{0,1}[0-9,]+$' REGEX_INTEGER = re.compile(REGEX_INTEGER_PATTERN) REGEX_NUMBER_PATTERN = r'^[\-]{0,1}[\$]{0,1}[0-9,]+\.[0-9]*%{0,1}$|^[\-]{0,1}[\$]{0,1}[0-9,]+%{0,1}$' REGEX_NUMBER = re.compile(REGEX_NUMBER_PATTERN) REGEX_PHONE_NUMBER_PATTERN = r'^\s*(?:\+?(\d{1,3}))?[-. (]*(\d{3})[-. )]*(\d{3})[-. ]*(\d{4})(?: *x(\d+))?\s*$' REGEX_PHONE_NUMBER = re.compile(REGEX_PHONE_NUMBER_PATTERN) REGEX_ZIP_CODE_PATTERN = r'^\d{3,5}(?:[-\s]\d{4})?$' REGEX_ZIP_CODE = re.compile(REGEX_ZIP_CODE_PATTERN) def get_mismatched_row_count(series, column_type): mismatched_rows = 0 if column_type == EMAIL: mismatched_rows = len( series[~series.str.contains(REGEX_EMAIL)].index, ) elif column_type == PHONE_NUMBER: mismatched_rows = len( series[~series.str.contains(REGEX_PHONE_NUMBER)].index, ) elif column_type == ZIP_CODE: mismatched_rows = len( series[~series.str.contains(REGEX_ZIP_CODE)].index, ) return mismatched_rows def infer_column_types(df, **kwargs): binary_feature_names = [] category_feature_names = [] datetime_feature_names = [] email_features = [] float_feature_names = [] integer_feature_names = [] non_number_feature_names = [] phone_number_feature_names = [] text_feature_names = [] zip_code_feature_names = [] for idx, col_type in enumerate(df.dtypes): col_name = df.columns[idx] if 'datetime64' in str(col_type): datetime_feature_names.append(col_name) elif col_type == 'object': df_sub = df[col_name].copy() df_sub = df_sub.replace('^\s+$', np.nan, regex=True) df_sub = df_sub.dropna() df_sub = df_sub.apply(lambda x: x.strip() if type(x) is str else x) if df_sub.empty: non_number_feature_names.append(col_name) else: first_item = df_sub.iloc[0] if type(first_item) is list: text_feature_names.append(col_name) elif type(first_item) is bool or type(first_item) is np.bool_: if len(df[col_name].unique()) <= 2: binary_feature_names.append(col_name) else: category_feature_names.append(col_name) elif len(df[col_name].unique()) <= 2: binary_feature_names.append(col_name) else: df_sub = df_sub.astype(str) incorrect_emails = len( df_sub[~df_sub.str.contains(REGEX_EMAIL)].index, ) warnings.filterwarnings('ignore', 'This pattern has match groups') incorrect_phone_numbers = len( df_sub[~df_sub.str.contains(REGEX_PHONE_NUMBER)].index, ) incorrect_zip_codes = len( df_sub[~df_sub.str.contains(REGEX_ZIP_CODE)].index, ) if all(df_sub.str.contains(REGEX_INTEGER)): integer_feature_names.append(col_name) elif all(df_sub.str.contains(REGEX_NUMBER)): float_feature_names.append(col_name) elif incorrect_emails / len(df_sub.index) <= 0.99: email_features.append(col_name) elif incorrect_phone_numbers / len(df_sub.index) <= 0.99: phone_number_feature_names.append(col_name) elif incorrect_zip_codes / len(df_sub.index) <= 0.99: zip_code_feature_names.append(col_name) else: non_number_feature_names.append(col_name) elif col_type == 'bool': binary_feature_names.append(col_name) elif np.issubdtype(col_type, np.floating): float_feature_names.append(col_name) elif np.issubdtype(col_type, np.integer): df_sub = df[col_name].copy() df_sub = df_sub.dropna() if df_sub.min() >= 100 and df_sub.max() <= 99999 and 'zip' in col_name.lower(): zip_code_feature_names.append(col_name) else: integer_feature_names.append(col_name) number_feature_names = float_feature_names + integer_feature_names binary_feature_names += \ [col for col in number_feature_names if df[col].nunique(dropna=False) == 2] binary_feature_names += \ [col for col in non_number_feature_names if df[col].nunique(dropna=False) == 2] float_feature_names = [col for col in float_feature_names if col not in binary_feature_names] integer_feature_names = \ [col for col in integer_feature_names if col not in binary_feature_names] for col_name in subtract(non_number_feature_names, binary_feature_names): df_drop_na = df[col_name].dropna() if df_drop_na.empty: text_feature_names.append(col_name) else: matches = df_drop_na.astype(str).str.contains(REGEX_DATETIME_PATTERN) matches = matches.where(matches == True).dropna() if type(df_drop_na.iloc[0]) is list: text_feature_names.append(col_name) elif len(df_drop_na[matches.index]) / len(df_drop_na) >= DATETIME_MATCHES_THRESHOLD: datetime_feature_names.append(col_name) elif df_drop_na.nunique() / len(df_drop_na) >= 0.8: text_feature_names.append(col_name) else: word_count, _ = \ df[col_name].dropna().map(lambda x: (len(str(x).split(' ')), str(x))).max() if word_count > MAXIMUM_WORD_LENGTH_FOR_CATEGORY_FEATURES: text_feature_names.append(col_name) else: category_feature_names.append(col_name) low_cardinality_category_feature_names = \ [col for col in category_feature_names if df[col].nunique() <= kwargs.get( 'category_cardinality_threshold', 255, )] high_cardinality_category_feature_names = \ [col for col in category_feature_names if col not in low_cardinality_category_feature_names] column_types = {} array_types_mapping = { CATEGORY: low_cardinality_category_feature_names, CATEGORY_HIGH_CARDINALITY: high_cardinality_category_feature_names, DATETIME: datetime_feature_names, EMAIL: email_features, NUMBER: integer_feature_names, NUMBER_WITH_DECIMALS: float_feature_names, PHONE_NUMBER: phone_number_feature_names, TEXT: text_feature_names, TRUE_OR_FALSE: binary_feature_names, ZIP_CODE: zip_code_feature_names, } for col_type, arr in array_types_mapping.items(): for col in arr: column_types[col] = col_type return column_types mage_ai/data_cleaner/__init__.py METASEP mage_ai/__init__.py METASEP mage_ai/server/routes.py METASEP
[ { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)\n pipeline.pipeline = BasePipeline(request_data.get('actions', []))\n response = app.response_class(", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)\n pipeline.pipeline = BasePipeline(request_data.get('actions', []))\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n# @app.route(\"/feature_sets/<id>/columns/<column_name>\")\n# def feature_set_column(id, column_name):\n# feature_set = FeatureSet(id=id)\n# return feature_set.column(column_name)\n\ndef clean_df(df, name, pipeline_uuid=None):\n feature_set = FeatureSet(df=df, name=name)\n", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)\n pipeline.pipeline = BasePipeline(request_data.get('actions', []))\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n# @app.route(\"/feature_sets/<id>/columns/<column_name>\")\n# def feature_set_column(id, column_name):\n# feature_set = FeatureSet(id=id)\n# return feature_set.column(column_name)\n\ndef clean_df(df, name, pipeline_uuid=None):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = clean_data(df)\n", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)\n pipeline.pipeline = BasePipeline(request_data.get('actions', []))\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n# @app.route(\"/feature_sets/<id>/columns/<column_name>\")\n# def feature_set_column(id, column_name):\n# feature_set = FeatureSet(id=id)\n# return feature_set.column(column_name)\n\ndef clean_df(df, name, pipeline_uuid=None):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = clean_data(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n return (feature_set, result['df_cleaned'])\n\ndef connect_df(df, name):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = analyze(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']\n metadata['column_types'] = column_types\n ", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)\n pipeline.pipeline = BasePipeline(request_data.get('actions', []))\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n# @app.route(\"/feature_sets/<id>/columns/<column_name>\")\n# def feature_set_column(id, column_name):\n# feature_set = FeatureSet(id=id)\n# return feature_set.column(column_name)\n\ndef clean_df(df, name, pipeline_uuid=None):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = clean_data(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n return (feature_set, result['df_cleaned'])\n\ndef connect_df(df, name):", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)\n pipeline.pipeline = BasePipeline(request_data.get('actions', []))\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n# @app.route(\"/feature_sets/<id>/columns/<column_name>\")\n# def feature_set_column(id, column_name):\n# feature_set = FeatureSet(id=id)\n# return feature_set.column(column_name)\n\ndef clean_df(df, name, pipeline_uuid=None):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = clean_data(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)\n pipeline.pipeline = BasePipeline(request_data.get('actions', []))\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n# @app.route(\"/feature_sets/<id>/columns/<column_name>\")\n# def feature_set_column(id, column_name):\n# feature_set = FeatureSet(id=id)\n# return feature_set.column(column_name)\n\ndef clean_df(df, name, pipeline_uuid=None):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = clean_data(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n return (feature_set, result['df_cleaned'])\n\ndef connect_df(df, name):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = analyze(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']\n metadata['column_types'] = column_types\n \n feature_set.metadata = metadata", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)", "type": "infile" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)", "type": "inproject" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:", "type": "inproject" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n", "type": "common" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)\n pipeline.pipeline = BasePipeline(request_data.get('actions', []))\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n# @app.route(\"/feature_sets/<id>/columns/<column_name>\")\n# def feature_set_column(id, column_name):\n# feature_set = FeatureSet(id=id)\n# return feature_set.column(column_name)\n\ndef clean_df(df, name, pipeline_uuid=None):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = clean_data(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n return (feature_set, result['df_cleaned'])\n\ndef connect_df(df, name):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = analyze(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']\n metadata['column_types'] = column_types\n \n feature_set.metadata = metadata\n return (feature_set, df)\n\ndef launch() -> None:\n app_kwargs = {\"port\": 5000, \"host\": \"localhost\", \"debug\": False}", "type": "common" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)\n pipeline.pipeline = BasePipeline(request_data.get('actions', []))\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n# @app.route(\"/feature_sets/<id>/columns/<column_name>\")\n# def feature_set_column(id, column_name):\n# feature_set = FeatureSet(id=id)\n# return feature_set.column(column_name)\n\ndef clean_df(df, name, pipeline_uuid=None):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = clean_data(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n return (feature_set, result['df_cleaned'])\n\ndef connect_df(df, name):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = analyze(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']\n metadata['column_types'] = column_types\n \n feature_set.metadata = metadata\n return (feature_set, df)\n\ndef launch() -> None:\n app_kwargs = {\"port\": 5000, \"host\": \"localhost\", \"debug\": False}\n thread = threading.Thread(target=app.run, kwargs=app_kwargs, daemon=True)", "type": "common" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,", "type": "non_informative" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n", "type": "non_informative" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"", "type": "non_informative" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,", "type": "non_informative" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form", "type": "non_informative" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"", "type": "random" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)\n pipeline.pipeline = BasePipeline(request_data.get('actions', []))\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n# @app.route(\"/feature_sets/<id>/columns/<column_name>\")\n# def feature_set_column(id, column_name):\n# feature_set = FeatureSet(id=id)\n# return feature_set.column(column_name)\n\ndef clean_df(df, name, pipeline_uuid=None):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = clean_data(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n return (feature_set, result['df_cleaned'])\n\ndef connect_df(df, name):\n feature_set = FeatureSet(df=df, name=name)\n\n metadata = feature_set.metadata\n\n result = analyze(df)\n\n feature_set.write_files(result)\n\n column_types = result['column_types']", "type": "random" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']", "type": "random" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))\n response = app.response_class(\n response=json.dumps(feature_sets, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets/<id>\")\ndef feature_set(id):\n feature_set = FeatureSet(id=id)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n metadata,\n statistics,\n insights,\n suggestions,\n}\n\nresponse: {\n id,\n metadata,\n pipeline,\n sample_data,\n statistics,\n insights,\n suggestions,\n}\n\"\"\"\[email protected](\"/feature_sets/<id>\", methods=[\"PUT\"])\ndef update_feature_set(id):\n request_data = request.json\n feature_set = FeatureSet(id=id)\n feature_set.write_files(request_data)\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n pipeline_actions,\n }\n]\n\"\"\"\[email protected](\"/pipelines\")\ndef pipelines():\n pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))\n response = app.response_class(\n response=json.dumps(pipelines, cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\")\ndef pipeline(id):\n pipeline = Pipeline(id=id)\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nrequest: {\n actions,\n}\n\nresponse: {\n id,\n actions,\n}\n\"\"\"\[email protected](\"/pipelines/<id>\", methods=[\"PUT\"])\ndef update_pipeline(id):\n request_data = request.json\n pipeline = Pipeline(id=id)\n pipeline.pipeline = BasePipeline(request_data.get('actions', []))\n response = app.response_class(\n response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),\n status=200,", "type": "random" }, { "content": "from data_cleaner.data_cleaner import analyze, clean as clean_data\nfrom data_cleaner.pipelines.base import BasePipeline\nfrom flask import render_template, request\nfrom numpyencoder import NumpyEncoder\nfrom server.data.models import FeatureSet, Pipeline\nfrom server import app\n\nimport json\nimport threading\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n\n\n\"\"\"\nrequest: {\n id: string (feature set id)\n clean: boolean\n}\n\nresponse: {\n id,\n metadata,\n sample_data,\n statistics,\n insights,\n suggestions\n}\n\"\"\"\[email protected](\"/process\", methods=[\"POST\"])\ndef process():\n request_data = request.json\n if not request_data:\n request_data = request.form\n\n id = request_data['id']\n if not id:\n return\n\n feature_set = FeatureSet(id=id)\n df = feature_set.data\n metadata = feature_set.metadata\n\n if request_data.get('clean', True):\n result = clean_data(df)\n else:\n result = analyze(df)\n\n feature_set.write_files(result)\n \n column_types = result['column_types']\n metadata['column_types'] = column_types\n\n feature_set.metadata = metadata\n\n response = app.response_class(\n response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\"\"\"\nresponse: [\n {\n id,\n metadata,\n }\n]\n\"\"\"\[email protected](\"/feature_sets\")\ndef feature_sets():\n feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))", "type": "random" } ]
[ " pipelines = list(map(lambda p: p.to_dict(False), Pipeline.objects()))", " response=json.dumps(pipelines, cls=NumpyEncoder),", " pipeline = Pipeline(id=id)", " response=json.dumps(pipeline.to_dict(), cls=NumpyEncoder),", " feature_set = FeatureSet(id=id)", " df = feature_set.data", " metadata = feature_set.metadata", " feature_set.write_files(result)", " feature_set.metadata = metadata", " response=json.dumps(feature_set.to_dict(), cls=NumpyEncoder),", " feature_set = FeatureSet(df=df, name=name)", " feature_sets = list(map(lambda fs: fs.to_dict(False), FeatureSet.objects()))", " response=json.dumps(feature_sets, cls=NumpyEncoder),", " return (feature_set, result['df_cleaned'])", " return (feature_set, df)", " feature_set.write_files(request_data)", " pipeline.pipeline = BasePipeline(request_data.get('actions', []))", " result = analyze(df)", " if request_data.get('clean', True):", " thread = threading.Thread(target=app.run, kwargs=app_kwargs, daemon=True)", " thread.start()", " insights,", "response: {", "response: [", " metadata,", "", "@app.route(\"/feature_sets\")", " metadata['column_types'] = column_types", " if not id:", " mimetype='application/json'", " response = app.response_class(" ]
METASEP
1
qiboteam__qibolab
qiboteam__qibolab METASEP src/qibolab/tests/regressions/__init__.py METASEP src/qibolab/tests/test_tomography.py METASEP import os import json import pathlib import pytest import numpy as np from qibolab.tomography import Tomography REGRESSION_FOLDER = pathlib.Path(__file__).with_name("regressions") def assert_regression_fixture(array, filename): """Check array matches data inside filename. Args: array: numpy array filename: fixture filename If filename does not exists, this function creates the missing file otherwise it loads from file and compare. """ filename = REGRESSION_FOLDER/filename try: target = np.load(filename) np.testing.assert_allclose(array, target) except: # pragma: no cover # case not tested in GitHub workflows because files exist np.save(filename, array) # def test_cholesky_init(): # m = np.random.random((5, 5)) # c = Cholesky.from_matrix(m) # np.testing.assert_allclose(c.matrix, m) # v = np.random.random((5,)) # c = Cholesky.from_vector(v) # np.testing.assert_allclose(c.vector, v) # with pytest.raises(ValueError): # c = Cholesky(matrix=m, vector=v) # with pytest.raises(TypeError): # c = Cholesky(matrix="test") # with pytest.raises(TypeError): # c = Cholesky(vector="test") # def test_cholesky_decompose(): # m = np.array([[1, 2, 3, 4, 5], # [2, 3, 4, 5, 6], # [3, 4, 5, 6, 7], # [4, 5, 6, 7, 8], # [5, 6, 7, 8, 9]]) # m = m + m.T # m = m + 5 * np.eye(5, dtype=m.dtype) # c = Cholesky.decompose(m) # target_matrix = np.array([[1, 0, 0, 0, 0], # [0, 2, 0, 0, 0], # [0, 0, 7, 0, 0], # [1, 2, 2, 4, 0], # [0, 0, 0, 0, 0]]) # target_vector = np.array([1, 2, 7, 4, 0, 0, 0, 0, 1, 2, 2, 0, 0, 0, 0, # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # np.testing.assert_allclose(c.matrix, target_matrix) # np.testing.assert_allclose(c.vector, target_vector) # def test_cholesky_reconstruct(): # v = np.arange(16) # c = Cholesky.from_vector(v) # target_matrix = np.array([ # [0.38709677+0.j, 0.32580645-0.01774194j, 0.21612903-0.02741935j, 0.01693548-0.03145161j], # [0.32580645+0.01774194j, 0.35564516+0.j, 0.23709677-0.02419355j, 0.01935484-0.03387097j], # [0.21612903+0.02741935j, 0.23709677+0.02419355j, 0.25+0.j, 0.02177419-0.03629032j], # [0.01693548+0.03145161j, 0.01935484+0.03387097j, 0.02177419+0.03629032j, 0.00725806+0.j]]) # np.testing.assert_allclose(c.reconstruct(), target_matrix, atol=1e-7) # def test_tomography_find_beta(): # amplitudes = np.random.random(16) # state = np.array([1, 2, 3, 4]) # tom = Tomography(amplitudes, state) # target_beta = [2.5, -1, -0.5, 0] # np.testing.assert_allclose(tom.find_beta(state), target_beta) @pytest.mark.skip def test_tomography_init(): n = 3 states = np.random.random((4**n, n)) gates = np.random.random((4**n, 2**n, 2**n)) tom = Tomography(states, gates) np.testing.assert_allclose(tom.states, states) np.testing.assert_allclose(tom.gates, gates) def test_tomography_default_gates(): n = 3 states = np.random.random((4**n, n)) tom = Tomography(states) assert_regression_fixture(tom.gates, "default_gates.npy") def test_tomography_linear(): n = 3 states = np.random.random((4**n, n)) tom = Tomography(states) assert_regression_fixture(tom.linear, "linear_estimation.npy") @pytest.mark.skip def test_tomography_fit(): n = 3 states = np.random.random((4**n, n)) tom = Tomography(states) with pytest.raises(ValueError): tom.fit tom.minimize() assert tom.success assert_regression_fixture(tom.fit, "mlefit_estimation.npy") def extract_json(filepath): with open(filepath, "r") as file: raw = json.loads(file.read()) data = np.stack(list(raw.values())) return np.sqrt((data ** 2).sum(axis=1)) @pytest.mark.skip @pytest.mark.parametrize("state_value,target_fidelity", [(0, 93.01278047175582), (1, 82.30795926024483), (2, 65.06114271984393), (3, 22.230579223385284)]) def test_tomography_example(state_value, target_fidelity): state_path = REGRESSION_FOLDER / "states_181120.json" amplitude_path = "tomo_181120-{0:02b}.json".format(state_value) amplitude_path = REGRESSION_FOLDER / amplitude_path state = extract_json(state_path) amp = extract_json(amplitude_path) tom = Tomography(amp, state) tom.minimize() assert tom.success rho_theory = np.zeros((4, 4), dtype=complex) rho_theory[state_value, state_value] = 1 fidelity = tom.fidelity(rho_theory) np.testing.assert_allclose(fidelity, target_fidelity) src/qibolab/tests/test_pulses.py METASEP import pytest import numpy as np from qibolab import pulses from qibolab.pulse_shapes import Rectangular, Gaussian, Drag, SWIPHT from qibolab.circuit import PulseSequence def test_basic_pulse(): basic = pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, "Rectangular") target_repr = "P(0, 0.5, 1.5, 0.8, 40.0, 0.7, Rectangular)" assert repr(basic) == target_repr def test_multifrequency_pulse(): members = [ pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, "Rectangular", channel=0), pulses.Pulse(0.5, 5.0, 0.7, 100, 0.5, "Gaussian", channel=1), pulses.Pulse(1.0, 3.5, 0.4, 70.0, 0.7, "Rectangular", channel=2) ] multi = pulses.MultifrequencyPulse(members) target_repr = "M(P(0, 0.5, 1.5, 0.8, 40.0, 0.7, Rectangular), "\ "P(1, 0.5, 5.0, 0.7, 100, 0.5, Gaussian), "\ "P(2, 1.0, 3.5, 0.4, 70.0, 0.7, Rectangular))" assert repr(multi) == target_repr def test_file_pulse(): filep = pulses.FilePulse(0, 1.0, "testfile") target_repr = "F(0, 1.0, testfile)" assert repr(filep) == target_repr def test_rectangular_shape(): rect = Rectangular() assert rect.name == "rectangular" assert rect.envelope(1.0, 0.2, 2.2, 4.5) == 4.5 def test_gaussian_shape(): gauss = Gaussian(1.5) assert gauss.name == "gaussian" assert gauss.sigma == 1.5 assert repr(gauss) == "(gaussian, 1.5)" target_envelop = 4.4108940298803985 time = np.array([1.0]) assert gauss.envelope(time, 0.2, 2.2, 4.5) == target_envelop def test_drag_shape(): drag = Drag(1.5, 2.5) assert drag.name == "drag" assert drag.sigma == 1.5 assert drag.beta == 2.5 assert repr(drag) == "(drag, 1.5, 2.5)" target_envelop = 4.4108940298803985 + 1.470298009960133j time = np.array([1.0]) assert drag.envelope(time, 0.2, 2.2, 4.5) == target_envelop def test_swipht_shape(): swipht = SWIPHT(2.2) assert swipht.name == "SWIPHT" assert swipht.g == 2.2 assert repr(swipht) == "(SWIPHT, 2.2)" target_envelop = 4.4108940298803985 time = np.array([1.0]) assert swipht.envelope(time, 0.2, 2.2, 4.5) == 4.5 # TODO: Fix these tests so that waveform is not zero @pytest.mark.skip("Pulse compile was changed after implementing TIIq.") def test_basic_pulse_compile(): seq = PulseSequence([]) waveform = np.zeros((seq.nchannels, seq.sample_size)) basic = pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Gaussian(1.0)) waveform = basic.compile(waveform, seq) target_waveform = np.zeros_like(waveform) np.testing.assert_allclose(waveform, target_waveform) @pytest.mark.skip("Pulse compile was changed after implementing TIIq.") def test_multifrequency_pulse_compile(): seq = PulseSequence([]) waveform = np.zeros((seq.nchannels, seq.sample_size), dtype="complex128") members = [ pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Gaussian(1.0)), pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Drag(1.0, 1.5)) ] multi = pulses.MultifrequencyPulse(members) waveform = multi.compile(waveform, seq) target_waveform = np.zeros_like(waveform) np.testing.assert_allclose(waveform, target_waveform) @pytest.mark.skip("Skipping this test because `sequence.file_dir` is not available") def test_file_pulse_compile(): seq = PulseSequence([]) waveform = np.zeros((seq.nchannels, seq.sample_size)) filep = pulses.FilePulse(0, 1.0, "file") waveform = filep.compile(waveform, seq) target_waveform = np.zeros_like(waveform) np.testing.assert_allclose(waveform, target_waveform) src/qibolab/tests/test_experiments.py METASEP import pytest import qibolab import pyvisa def test_experiment_getter_setter(): assert qibolab.get_experiment() == "icarusq" with pytest.raises(KeyError): qibolab.set_experiment("test") qibolab.set_experiment("icarusq") @pytest.mark.xfail(raises=pyvisa.errors.VisaIOError) def test_icarusq_awg_setter(): assert qibolab.get_experiment() == "icarusq" qibolab.set_experiment("awg") qibolab.set_experiment("icarusq") src/qibolab/tests/test_circuit.py METASEP import pytest import numpy as np import qibo from qibo import gates, models from qibolab import pulses from qibolab.pulse_shapes import Gaussian, Drag from qibolab.circuit import PulseSequence, HardwareCircuit # TODO: Parametrize these tests using experiment @pytest.mark.skip def test_pulse_sequence_init(): seq = PulseSequence([]) assert seq.pulses == [] assert seq.duration == 1.391304347826087e-05 assert seq.sample_size == 32000 seq = PulseSequence([], duration=2e-6) assert seq.pulses == [] assert seq.duration == 2e-6 assert seq.sample_size == 4600 @pytest.mark.skip("Skipping this test because `seq.file_dir` is not available") def test_pulse_sequence_compile(): seq = PulseSequence([ pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Gaussian(1.0)), pulses.FilePulse(0, 1.0, "file"), pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Drag(1.0, 1.5)) ]) waveform = seq.compile() target_waveform = np.zeros_like(waveform) np.testing.assert_allclose(waveform, target_waveform) def test_pulse_sequence_serialize(): seq = PulseSequence([ pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Gaussian(1.0)), pulses.FilePulse(0, 1.0, "file"), pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Drag(1.0, 1.5)) ]) target_repr = "P(0, 0.5, 1.5, 0.8, 40.0, 0.7, (gaussian, 1.0)), "\ "F(0, 1.0, file), "\ "P(0, 0.5, 1.5, 0.8, 40.0, 0.7, (drag, 1.0, 1.5))" assert seq.serialize() == target_repr def test_hardwarecircuit_errors(): qibo.set_backend("qibolab") c = models.Circuit(5) with pytest.raises(NotImplementedError): c._add_layer() with pytest.raises(NotImplementedError): c.fuse() @pytest.mark.skip def test_hardwarecircuit_sequence_duration(): from qibolab import experiment qibo.set_backend("qibolab") c = models.Circuit(2) c.add(gates.RX(0, theta=0.123)) c.add(gates.RY(0, theta=0.123)) c.add(gates.H(0)) c.add(gates.Align(0)) c.add(gates.M(0)) c.qubit_config = experiment.static.initial_calibration qubit_times = c._calculate_sequence_duration(c.queue) # pylint: disable=E1101 target_qubit_times = [3.911038e-08, 0] np.testing.assert_allclose(qubit_times, target_qubit_times) @pytest.mark.skip def test_hardwarecircuit_create_pulse_sequence(): from qibolab import experiment qibo.set_backend("qibolab") c = models.Circuit(2) c.add(gates.RX(0, theta=0.123)) c.add(gates.RY(0, theta=0.123)) c.add(gates.H(0)) c.add(gates.Align(0)) c.add(gates.M(0)) c.qubit_config = experiment.static.initial_calibration c.qubit_config[0]["gates"]["measure"] = [] qubit_times = np.zeros(c.nqubits) - c._calculate_sequence_duration(c.queue) # pylint: disable=E1101 qubit_phases = np.zeros(c.nqubits) pulse_sequence = c.create_pulse_sequence(c.queue, qubit_times, qubit_phases) # pylint: disable=E1101 target_pulse_sequence = "P(3, -1.940378868990046e-09, 9.70189434495023e-10, 0.375, 747382500.0, 0.0, (rectangular)), "\ "P(3, -9.70189434495023e-10, 9.70189434495023e-10, 0.375, 747382500.0, 90.0, (rectangular))" pulse_sequence.serialize() == target_pulse_sequence src/qibolab/tests/__init__.py METASEP src/qibolab/platforms/qbloxplatform.py METASEP from qibo.config import raise_error, log from qibolab.platforms.abstract import AbstractPlatform class QBloxPlatform(AbstractPlatform): """Platform for controlling quantum devices using QCM and QRM. Example: .. code-block:: python from qibolab import Platform platform = Platform("tiiq") """ def __init__(self, name, runcard): self._qrm = None self._qcm = None self._LO_qrm = None self._LO_qcm = None super().__init__(name, runcard) self.last_qcm_pulses = None self.last_qrm_pulses = None @property def qrm(self): """Reference to :class:`qibolab.instruments.qblox.PulsarQRM` instrument.""" self._check_connected() return self._qrm @property def qcm(self): """Reference to :class:`qibolab.instruments.qblox.PulsarQCM` instrument.""" self._check_connected() return self._qcm @property def LO_qrm(self): """Reference to QRM local oscillator (:class:`qibolab.instruments.rohde_schwarz.SGS100A`).""" self._check_connected() return self._LO_qrm @property def LO_qcm(self): """Reference to QCM local oscillator (:class:`qibolab.instruments.rohde_schwarz.SGS100A`).""" self._check_connected() return self._LO_qcm def connect(self): """Connects to lab instruments using the details specified in the calibration settings.""" if not self.is_connected: log.info(f"Connecting to {self.name} instruments.") try: from qibolab.instruments import PulsarQRM, PulsarQCM, SGS100A self._qrm = PulsarQRM( **self._settings.get("QRM_init_settings")) self._qcm = PulsarQCM( **self._settings.get("QCM_init_settings")) self._LO_qrm = SGS100A( **self._settings.get("LO_QRM_init_settings")) self._LO_qcm = SGS100A( **self._settings.get("LO_QCM_init_settings")) self.is_connected = True except Exception as exception: raise_error(RuntimeError, "Cannot establish connection to " f"{self.name} instruments. " f"Error captured: '{exception}'") def setup(self): """Configures instruments using the loaded calibration settings.""" if self.is_connected: self._qrm.setup(**self._settings.get("QRM_settings")) self._qcm.setup(**self._settings.get("QCM_settings")) self._LO_qrm.setup(**self._settings.get("LO_QRM_settings")) self._LO_qcm.setup(**self._settings.get("LO_QCM_settings")) def start(self): """Turns on the local oscillators. The QBlox insturments are turned on automatically during execution after the required pulse sequences are loaded. """ self._LO_qcm.on() self._LO_qrm.on() def stop(self): """Turns off all the lab instruments.""" self.LO_qrm.off() self.LO_qcm.off() self.qrm.stop() self.qcm.stop() def disconnect(self): """Disconnects from the lab instruments.""" if self.is_connected: self._LO_qrm.close() self._LO_qcm.close() self._qrm.close() self._qcm.close() self.is_connected = False def execute(self, sequence, nshots=None): """Executes a pulse sequence. Pulses are being cached so that are not reuploaded if they are the same as the ones sent previously. This greatly accelerates some characterization routines that recurrently use the same set of pulses, i.e. qubit and resonator spectroscopy, spin echo, and future circuits based on fixed gates. Args: sequence (:class:`qibolab.pulses.PulseSequence`): Pulse sequence to execute. nshots (int): Number of shots to sample from the experiment. If ``None`` the default value provided as hardware_avg in the calibration json will be used. Returns: Readout results acquired by :class:`qibolab.instruments.qblox.PulsarQRM` after execution. """ if not self.is_connected: raise_error( RuntimeError, "Execution failed because instruments are not connected.") if nshots is None: nshots = self.hardware_avg # Translate and upload instructions to instruments if sequence.qcm_pulses: if self.last_qcm_pulses != [pulse.serial() for pulse in sequence.qcm_pulses]: waveforms, program = self._qcm.translate(sequence, self.delay_before_readout, nshots) self._qcm.upload(waveforms, program, self.data_folder) if sequence.qrm_pulses: if self.last_qrm_pulses != [pulse.serial() for pulse in sequence.qrm_pulses]: waveforms, program = self._qrm.translate(sequence, self.delay_before_readout, nshots) self._qrm.upload(waveforms, program, self.data_folder) # Execute instructions if sequence.qcm_pulses: self._qcm.play_sequence() if sequence.qrm_pulses: # TODO: Find a better way to pass the readout pulse here acquisition_results = self._qrm.play_sequence_and_acquire( sequence.qrm_pulses[0]) else: acquisition_results = None self.last_qcm_pulses = [pulse.serial() for pulse in sequence.qcm_pulses] self.last_qrm_pulses = [pulse.serial() for pulse in sequence.qrm_pulses] return acquisition_results src/qibolab/platforms/icplatform.py METASEP import copy from qibo.config import raise_error, log from qibolab.platforms.abstract import AbstractPlatform class Qubit: """Describes a single qubit in pulse control and readout extraction. Args: id (int): Qubit ID. pi_pulse (dict): Qubit pi-pulse parameters. See qibolab.pulses.Pulse for more information. readout_pulse (dict): Qubit readout pulse parameters. See qibolab.pulses.ReadoutPulse for more information. resonator_spectroscopy_max_ro_voltage (float): Readout voltage corresponding to the ground state of the qubit. rabi_oscillations_pi_pulse_min_voltage (float): Readout voltage corresponding to the excited state of the qubit. playback (str): Instrument name for playing the qubit XY control pulses. playback_readout (str): Instrument name for playing the qubit readout pulse. readout_frequency (float): Readout frequency for IQ demodulation. readout (str): Instrument name for reading the qubit. readout_channels (int, int[]): Channels on the instrument associated to qubit readout. """ def __init__(self, pi_pulse, readout_pulse, readout_frequency, resonator_spectroscopy_max_ro_voltage, rabi_oscillations_pi_pulse_min_voltage, playback, playback_readout, readout, readout_channels): self.id = id self.pi_pulse = pi_pulse self.readout_pulse = readout_pulse self.readout_frequency = readout_frequency self.max_readout_voltage = resonator_spectroscopy_max_ro_voltage self.min_readout_voltage = rabi_oscillations_pi_pulse_min_voltage self.playback = playback self.playback_readout = playback_readout self.readout = readout self.readout_channels = readout_channels class ICPlatform(AbstractPlatform): """Platform for controlling quantum devices with IC. Example: .. code-block:: python from qibolab import Platform platform = Platform("icarusq") """ def __init__(self, name, runcard): self._instruments = [] self._lo = [] self._adc = [] super().__init__(name, runcard) self.qubits = [] qubits = self._settings.get("qubits") for qubit_dict in qubits.values(): self.qubits.append(Qubit(**qubit_dict)) def connect(self): """Connects to lab instruments using the details specified in the calibration settings.""" if not self.is_connected: log.info(f"Connecting to {self.name} instruments.") try: import qibolab.instruments as qi instruments = self._settings.get("instruments") for params in instruments.values(): inst = getattr(qi, params.get("type"))(**params.get("init_settings")) self._instruments.append(inst) # Use yaml config to track instrument type if params.get("lo"): self._lo.append(inst) if params.get("adc"): self._adc.append(inst) self.is_connected = True except Exception as exception: raise_error(RuntimeError, "Cannot establish connection to " f"{self.name} instruments. " f"Error captured: '{exception}'") def setup(self): """Configures instruments using the loaded calibration settings.""" if self.is_connected: instruments = self._settings.get("instruments") for inst in self._instruments: inst.setup(**instruments.get(inst.name).get("settings")) def start(self): """Turns on the local oscillators. At this point, the pulse sequence have not been uploaded to the DACs, so they will not be started yet. """ for lo in self._lo: lo.start() def stop(self): """Turns off all the lab instruments.""" for inst in self._instruments: inst.stop() def disconnect(self): """Disconnects from the lab instruments.""" if self.is_connected: for inst in self._instruments: inst.close() self._instruments = [] self._lo = [] self._adc = [] self.is_connected = False def execute(self, sequence, nshots=None): """Executes a pulse sequence. Args: sequence (:class:`qibolab.pulses.PulseSequence`): Pulse sequence to execute. nshots (int): Number of shots to sample from the experiment. If ``None`` the default value provided as hardware_avg in the calibration json will be used. Returns: Readout results acquired by :class:`qibolab.instruments.qblox.PulsarQRM` after execution. """ if not self.is_connected: raise_error( RuntimeError, "Execution failed because instruments are not connected.") if nshots is None: nshots = self.hardware_avg from qibolab.pulses import ReadoutPulse qubits_to_measure = [] measurement_results = [] pulse_mapping = {} for pulse in sequence.pulses: # Assign pulses to each respective waveform generator qubit = self.fetch_qubit(pulse.qubit) playback_device = qubit.playback # Track each qubit to measure if isinstance(pulse, ReadoutPulse): qubits_to_measure.append(pulse.qubit) playback_device = qubit.playback_readout if playback_device not in pulse_mapping.keys(): pulse_mapping[playback_device] = [] pulse_mapping[playback_device].append(pulse) # Translate and upload the pulse for each device for device, subsequence in pulse_mapping.items(): inst = self.fetch_instrument(device) inst.upload(inst.translate(subsequence, nshots)) inst.play_sequence() for adc in self._adc: adc.arm(nshots) # Start the experiment sequence self.start_experiment() # Fetch the experiment results for qubit_id in qubits_to_measure: qubit = self.fetch_qubit(qubit_id) inst = self.fetch_instrument(qubit.readout) measurement_results.append(inst.result(qubit.readout_frequency)) if len(qubits_to_measure) == 1: return measurement_results[0] return measurement_results def fetch_instrument(self, name): """Returns a reference to an instrument. """ try: res = next(inst for inst in self._instruments if inst.name == name) return res except StopIteration: raise_error(Exception, "Instrument not found") def fetch_qubit(self, qubit_id=0) -> Qubit: """Fetches the qubit based on the id. """ return self.qubits[qubit_id] def start_experiment(self): """Starts the instrument to start the experiment sequence. """ inst = self.fetch_instrument(self._settings.get("settings").get("experiment_start_instrument")) inst.start_experiment() def fetch_qubit_pi_pulse(self, qubit_id=0) -> dict: """Fetches the qubit pi-pulse. """ # Use copy to avoid mutability return copy.copy(self.fetch_qubit(qubit_id).pi_pulse) def fetch_qubit_readout_pulse(self, qubit_id=0) -> dict: """Fetches the qubit readout pulse. """ # Use copy to avoid mutability return copy.copy(self.fetch_qubit(qubit_id).readout_pulse) src/qibolab/platforms/abstract.py METASEP from abc import ABC, abstractmethod import yaml from qibo.config import raise_error, log class AbstractPlatform(ABC): """Abstract platform for controlling quantum devices. Args: name (str): name of the platform. runcard (str): path to the yaml file containing the platform setup. """ def __init__(self, name, runcard): log.info(f"Loading platform {name}") log.info(f"Loading runcard {runcard}") self.name = name # Load calibration settings with open(runcard, "r") as file: self._settings = yaml.safe_load(file) # Define references to instruments self.is_connected = False def _check_connected(self): if not self.is_connected: raise_error(RuntimeError, "Cannot access instrument because it is not connected.") @property def data_folder(self): return self._settings.get("settings").get("data_folder") @property def hardware_avg(self): return self._settings.get("settings").get("hardware_avg") @property def sampling_rate(self): return self._settings.get("settings").get("sampling_rate") @property def software_averages(self): return self._settings.get("settings").get("software_averages") @software_averages.setter def software_averages(self, x): self._settings["settings"]["software_averages"] = x @property def repetition_duration(self): return self._settings.get("settings").get("repetition_duration") @property def resonator_frequency(self): return self._settings.get("settings").get("resonator_freq") @property def qubit_frequency(self): return self._settings.get("settings").get("qubit_freq") @property def pi_pulse_gain(self): return self._settings.get("settings").get("pi_pulse_gain") @property def pi_pulse_amplitude(self): return self._settings.get("settings").get("pi_pulse_amplitude") @property def pi_pulse_duration(self): return self._settings.get("settings").get("pi_pulse_duration") @property def pi_pulse_frequency(self): return self._settings.get("settings").get("pi_pulse_frequency") @property def readout_pulse(self): return self._settings.get("settings").get("readout_pulse") @property def max_readout_voltage(self): return self._settings.get("settings").get("resonator_spectroscopy_max_ro_voltage") @property def min_readout_voltage(self): return self._settings.get("settings").get("rabi_oscillations_pi_pulse_min_voltage") @property def delay_between_pulses(self): return self._settings.get("settings").get("delay_between_pulses") @property def delay_before_readout(self): return self._settings.get("settings").get("delay_before_readout") def run_calibration(self, runcard): """Executes calibration routines and updates the settings json.""" # TODO: Implement calibration routines and update ``self._settings``. # update instruments with new calibration settings self.setup() # save new calibration settings to json with open(runcard, "w") as file: yaml.dump(self._settings, file) def __call__(self, sequence, nshots=None): return self.execute(sequence, nshots) @abstractmethod def connect(self): """Connects to lab instruments using the details specified in the calibration settings.""" raise_error(NotImplementedError) @abstractmethod def setup(self): """Configures instruments using the loaded calibration settings.""" raise_error(NotImplementedError) @abstractmethod def start(self): """Turns on the local oscillators.""" raise_error(NotImplementedError) @abstractmethod def stop(self): """Turns off all the lab instruments.""" raise_error(NotImplementedError) @abstractmethod def disconnect(self): """Disconnects from the lab instruments.""" raise_error(NotImplementedError) @abstractmethod def execute(self, sequence, nshots=None): """Executes a pulse sequence. Args: sequence (:class:`qibolab.pulses.PulseSequence`): Pulse sequence to execute. nshots (int): Number of shots to sample from the experiment. If ``None`` the default value provided as hardware_avg in the calibration json will be used. Returns: Readout results acquired by after execution. """ raise_error(NotImplementedError) src/qibolab/platforms/__init__.py METASEP src/qibolab/instruments/rohde_schwarz.py METASEP """ Class to interface with the local oscillator RohdeSchwarz SGS100A """ import logging from qibolab.instruments.instrument import Instrument, InstrumentException logger = logging.getLogger(__name__) # TODO: Consider using a global logger class SGS100A(Instrument): def __init__(self, label, ip): """ create Local Oscillator with name = label and connect to it in local IP = ip Params format example: "ip": '192.168.0.8', "label": "qcm_LO" """ super().__init__(ip) self.device = None self._power = None self._frequency = None self._connected = False self._signature = f"{type(self).__name__}@{ip}" self.label = label self.connect() def connect(self): import qcodes.instrument_drivers.rohde_schwarz.SGS100A as LO_SGS100A try: self.device = LO_SGS100A.RohdeSchwarz_SGS100A(self.label, f"TCPIP0::{self.ip}::inst0::INSTR") except Exception as exc: raise InstrumentException(self, str(exc)) self._connected = True logger.info("Local oscillator connected") def setup(self, power, frequency): self.set_power(power) self.set_frequency(frequency) def set_power(self, power): """Set dbm power to local oscillator.""" self._power = power self.device.power(power) logger.info(f"Local oscillator power set to {power}.") def set_frequency(self, frequency): self._frequency = frequency self.device.frequency(frequency) logger.info(f"Local oscillator frequency set to {frequency}.") def get_power(self): if self._power is not None: return self._power raise RuntimeError("Local oscillator power was not set.") def get_frequency(self): if self._frequency is not None: return self._frequency raise RuntimeError("Local oscillator frequency was not set.") def on(self): """Start generating microwaves.""" self.device.on() logger.info("Local oscillator on.") def off(self): """Stop generating microwaves.""" self.device.off() logger.info("Local oscillator off.") def close(self): if self._connected: self.off() self.device.close() self._connected = False # TODO: Figure out how to fix this #def __del__(self): # self.close() src/qibolab/instruments/qblox.py METASEP from abc import abstractmethod import json import numpy as np from abc import ABC, abstractmethod from qibo.config import raise_error from qibolab.instruments.instrument import Instrument, InstrumentException import logging logger = logging.getLogger(__name__) # TODO: Consider using a global logger class GenericPulsar(Instrument, ABC): def __init__(self, label, ip, sequencer, ref_clock, sync_en, is_cluster): super().__init__(ip) self.label = label # TODO When updating to the new firmware, use a sequencer mapping instead of setting a single sequencer self.sequencer = sequencer self.ref_clock = ref_clock self.sync_en = sync_en self.is_cluster = is_cluster self._connected = False self.Device = None self.device = None # To be defined in each instrument self.name = None # To be defined during setup self.hardware_avg = None self.initial_delay = None self.repetition_duration = None # hardcoded values used in ``generate_program`` self.delay_before_readout = 4 # same value is used for all readout pulses (?) self.wait_loop_step = 1000 self.duration_base = 16380 # maximum length of a waveform in number of samples (defined by the device memory). # hardcoded values used in ``upload`` # TODO QCM shouldn't have acquisitions self.acquisitions = {"single": {"num_bins": 1, "index":0}} self.weights = {} def connect(self): """Connects to the instruments.""" if not self._connected: try: self.device = self.Device(self.label, self.ip) except Exception as exc: raise InstrumentException(self, str(exc)) self._connected = True else: raise RuntimeError @property def gain(self): return self._gain @gain.setter def gain(self, gain): self._gain = gain if self.sequencer == 1: self.device.sequencer1_gain_awg_path0(gain) self.device.sequencer1_gain_awg_path1(gain) else: self.device.sequencer0_gain_awg_path0(gain) self.device.sequencer0_gain_awg_path1(gain) def setup(self, gain, initial_delay, repetition_duration): """Sets calibration setting to QBlox instruments. Args: gain (float): initial_delay (float): repetition_duration (float): """ self.gain = gain self.initial_delay = initial_delay self.repetition_duration = repetition_duration def _translate_single_pulse(self, pulse): """Translates a single pulse to the instrument waveform format. Helper method for :meth:`qibolab.instruments.qblox.GenericPulsar.generate_waveforms`. Args: pulse (:class:`qibolab.pulses.Pulse`): Pulse object to translate. Returns: Dictionary containing the waveform corresponding to the pulse. """ # Use the envelope to modulate a sinusoldal signal of frequency freq_if envelope_i = pulse.compile() # TODO: if ``envelope_q`` is not always 0 we need to find how to # calculate it envelope_q = np.zeros(int(pulse.duration)) time = np.arange(pulse.duration) * 1e-9 # FIXME: There should be a simpler way to construct this array cosalpha = np.cos(2 * np.pi * pulse.frequency * time + pulse.phase) sinalpha = np.sin(2 * np.pi * pulse.frequency * time + pulse.phase) mod_matrix = np.array([[cosalpha,sinalpha], [-sinalpha,cosalpha]]) result = [] for it, t, ii, qq in zip(np.arange(pulse.duration), time, envelope_i, envelope_q): result.append(mod_matrix[:, :, it] @ np.array([ii, qq])) mod_signals = np.array(result) # add offsets to compensate mixer leakage waveform = { "modI": {"data": mod_signals[:, 0] + pulse.offset_i, "index": 0}, "modQ": {"data": mod_signals[:, 1] + pulse.offset_q, "index": 1} } return waveform def generate_waveforms(self, pulses): """Translates a list of pulses to the instrument waveform format. Args: pulses (list): List of :class:`qibolab.pulses.Pulse` objects. Returns: Dictionary containing waveforms corresponding to all pulses. """ if not pulses: raise_error(NotImplementedError, "Cannot translate empty pulse sequence.") name = self.name combined_length = max(pulse.start + pulse.duration for pulse in pulses) waveforms = { f"modI_{name}": {"data": np.zeros(combined_length), "index": 0}, f"modQ_{name}": {"data": np.zeros(combined_length), "index": 1} } for pulse in pulses: waveform = self._translate_single_pulse(pulse) i0, i1 = pulse.start, pulse.start + pulse.duration waveforms[f"modI_{name}"]["data"][i0:i1] += waveform["modI"]["data"] waveforms[f"modQ_{name}"]["data"][i0:i1] += waveform["modQ"]["data"] #Fixing 0s addded to the qrm waveform. Needs to be improved, but working well on TIIq for pulse in pulses: if(pulse.channel == "qrm"): waveforms[f"modI_{name}"]["data"] = waveforms[f"modI_{name}"]["data"][pulse.start:] waveforms[f"modQ_{name}"]["data"] = waveforms[f"modQ_{name}"]["data"][pulse.start:] return waveforms def generate_program(self, hardware_avg, initial_delay, delay_before_readout, acquire_instruction, wait_time): """Generates the program to be uploaded to instruments.""" extra_duration = self.repetition_duration - self.duration_base extra_wait = extra_duration % self.wait_loop_step num_wait_loops = (extra_duration - extra_wait) // self. wait_loop_step # This calculation was moved to `PulsarQCM` and `PulsarQRM` #if ro_pulse is not None: # acquire_instruction = "acquire 0,0,4" # wait_time = self.duration_base - initial_delay - delay_before_readout - 4 #else: # acquire_instruction = "" # wait_time = self.duration_base - initial_delay - delay_before_readout if initial_delay != 0: initial_wait_instruction = f"wait {initial_delay}" else: initial_wait_instruction = "" program = f""" move {hardware_avg},R0 nop wait_sync 4 # Synchronize sequencers over multiple instruments loop: {initial_wait_instruction} play 0,1,{delay_before_readout} {acquire_instruction} wait {wait_time} move {num_wait_loops},R1 nop repeatloop: wait {self.wait_loop_step} loop R1,@repeatloop wait {extra_wait} loop R0,@loop stop """ return program @abstractmethod def translate(self, sequence, nshots): """Translates an abstract pulse sequence to QBlox format. Args: sequence (:class:`qibolab.pulses.PulseSequence`): Pulse sequence. Returns: The waveforms (dict) and program (str) required to execute the pulse sequence on QBlox instruments. """ raise_error(NotImplementedError) def upload(self, waveforms, program, data_folder): """Uploads waveforms and programs to QBlox sequencer to prepare execution.""" import os # Upload waveforms and program # Reformat waveforms to lists for name, waveform in waveforms.items(): if isinstance(waveform["data"], np.ndarray): waveforms[name]["data"] = waveforms[name]["data"].tolist() # JSON only supports lists # Add sequence program and waveforms to single dictionary and write to JSON file filename = f"{data_folder}/{self.name}_sequence.json" program_dict = { "waveforms": waveforms, "weights": self.weights, "acquisitions": self.acquisitions, "program": program } if not os.path.exists(data_folder): os.makedirs(data_folder) with open(filename, "w", encoding="utf-8") as file: json.dump(program_dict, file, indent=4) # Upload json file to the device if self.sequencer == 1: self.device.sequencer1_waveforms_and_program(os.path.join(os.getcwd(), filename)) else: self.device.sequencer0_waveforms_and_program(os.path.join(os.getcwd(), filename)) def play_sequence(self): """Executes the uploaded instructions.""" # arm sequencer and start playing sequence self.device.arm_sequencer() self.device.start_sequencer() def stop(self): """Stops the QBlox sequencer from sending pulses.""" self.device.stop_sequencer() def close(self): """Disconnects from the instrument.""" if self._connected: self.stop() self.device.close() self._connected = False # TODO: Figure out how to fix this #def __del__(self): # self.close() class PulsarQRM(GenericPulsar): """Class for interfacing with Pulsar QRM.""" def __init__(self, label, ip, ref_clock="external", sequencer=0, sync_en=True, hardware_avg_en=True, acq_trigger_mode="sequencer", is_cluster=True): super().__init__(label, ip, sequencer, ref_clock, sync_en, is_cluster) # Instantiate base object from qblox library and connect to it self.name = "qrm" if self.is_cluster: from cluster.cluster import cluster_qrm self.Device = cluster_qrm else: from pulsar_qrm.pulsar_qrm import pulsar_qrm self.Device = pulsar_qrm self.connect() self.sequencer = sequencer self.hardware_avg_en = hardware_avg_en # Reset and configure self.device.reset() self.device.reference_source(ref_clock) self.device.scope_acq_sequencer_select(sequencer) self.device.scope_acq_avg_mode_en_path0(hardware_avg_en) self.device.scope_acq_avg_mode_en_path1(hardware_avg_en) self.device.scope_acq_trigger_mode_path0(acq_trigger_mode) self.device.scope_acq_trigger_mode_path1(acq_trigger_mode) # sync sequencer if self.sequencer == 1: self.device.sequencer1_sync_en(sync_en) else: self.device.sequencer0_sync_en(sync_en) def setup(self, gain, initial_delay, repetition_duration, start_sample, integration_length, sampling_rate, mode): super().setup(gain, initial_delay, repetition_duration) self.start_sample = start_sample self.integration_length = integration_length self.sampling_rate = sampling_rate self.mode = mode def translate(self, sequence, delay_before_readout, nshots): # Allocate only readout pulses to PulsarQRM waveforms = self.generate_waveforms(sequence.qrm_pulses) # Generate program without acquire instruction initial_delay = sequence.qrm_pulses[0].start # Acquire waveforms over remaining duration of acquisition of input vector of length = 16380 with integration weights 0,0 acquire_instruction = "acquire 0,0,4" wait_time = self.duration_base - initial_delay - delay_before_readout - 4 # FIXME: Not sure why this hardcoded 4 is needed program = self.generate_program(nshots, initial_delay, delay_before_readout, acquire_instruction, wait_time) return waveforms, program def play_sequence_and_acquire(self, ro_pulse): """Executes the uploaded instructions and retrieves the readout results. Args: ro_pulse (:class:`qibolab.pulses.Pulse`): Readout pulse to use for retrieving the results. """ #arm sequencer and start playing sequence super().play_sequence() #start acquisition of data #Wait for the sequencer to stop with a timeout period of one minute. self.device.get_sequencer_state(0, 1) #Wait for the acquisition to finish with a timeout period of one second. self.device.get_acquisition_state(self.sequencer, 1) #Move acquisition data from temporary memory to acquisition list. self.device.store_scope_acquisition(self.sequencer, "single") #Get acquisition list from instrument. single_acq = self.device.get_acquisitions(self.sequencer) i, q = self._demodulate_and_integrate(single_acq, ro_pulse) acquisition_results = np.sqrt(i**2 + q**2), np.arctan2(q, i), i, q return acquisition_results def _demodulate_and_integrate(self, single_acq, ro_pulse): #DOWN Conversion norm_factor = 1. / (self.integration_length) n0 = self.start_sample n1 = self.start_sample + self.integration_length input_vec_I = np.array(single_acq["single"]["acquisition"]["scope"]["path0"]["data"][n0: n1]) input_vec_Q = np.array(single_acq["single"]["acquisition"]["scope"]["path1"]["data"][n0: n1]) input_vec_I -= np.mean(input_vec_I) input_vec_Q -= np.mean(input_vec_Q) if self.mode == 'ssb': modulated_i = input_vec_I modulated_q = input_vec_Q time = np.arange(modulated_i.shape[0])*1e-9 cosalpha = np.cos(2 * np.pi * ro_pulse.frequency * time) sinalpha = np.sin(2 * np.pi * ro_pulse.frequency * time) demod_matrix = 2 * np.array([[cosalpha, -sinalpha], [sinalpha, cosalpha]]) result = [] for it, t, ii, qq in zip(np.arange(modulated_i.shape[0]), time,modulated_i, modulated_q): result.append(demod_matrix[:,:,it] @ np.array([ii, qq])) demodulated_signal = np.array(result) integrated_signal = norm_factor*np.sum(demodulated_signal,axis=0) elif self.mode == 'optimal': raise_error(NotImplementedError, "Optimal Demodulation Mode not coded yet.") else: raise_error(NotImplementedError, "Demodulation mode not understood.") return integrated_signal class PulsarQCM(GenericPulsar): def __init__(self, label, ip, sequencer=0, ref_clock="external", sync_en=True, is_cluster=True): super().__init__(label, ip, sequencer, ref_clock, sync_en, is_cluster) # Instantiate base object from qblox library and connect to it self.name = "qcm" if self.is_cluster: from cluster.cluster import cluster_qcm self.Device = cluster_qcm else: from pulsar_qcm.pulsar_qcm import pulsar_qcm self.Device = pulsar_qcm self.connect() self.sequencer = sequencer # Reset and configure self.device.reset() self.device.reference_source(ref_clock) if self.sequencer == 1: self.device.sequencer1_sync_en(sync_en) else: self.device.sequencer0_sync_en(sync_en) def translate(self, sequence, delay_before_read_out, nshots=None): # Allocate only qubit pulses to PulsarQRM waveforms = self.generate_waveforms(sequence.qcm_pulses) # Generate program without acquire instruction initial_delay = sequence.qcm_pulses[0].start acquire_instruction = "" wait_time = self.duration_base - initial_delay - delay_before_read_out program = self.generate_program(nshots, initial_delay, delay_before_read_out, acquire_instruction, wait_time) return waveforms, program src/qibolab/instruments/instrument.py METASEP from abc import ABC, abstractmethod class Instrument(ABC): """ Parent class for all the instruments connected via TCPIP. """ def __init__(self, ip): self._connected = False self.ip = ip self._signature = f"{type(self).__name__}@{ip}" self.device = None @abstractmethod def connect(self): """ Establish connection with the instrument. Initialize self.device variable """ raise NotImplementedError @property def signature(self): return self._signature @abstractmethod def close(self): """ Close connection with the instrument. Set instrument values to idle values if required. """ raise NotImplementedError class InstrumentException(Exception): def __init__(self, instrument: Instrument, message: str): self.instrument = instrument header = f"InstrumentException with {self.instrument.signature}" full_msg = header + ": " + message super().__init__(full_msg) self.instrument = instrument src/qibolab/instruments/icarusq.py METASEP import pyvisa as visa import numpy as np from typing import List, Optional, Union from qcodes.instrument_drivers.AlazarTech import ATS # Frequency signal generation mode MODE_NYQUIST = 0 MODE_MIXER = 1 # Waveform functions def square(t, start, duration, frequency, amplitude, phase): x = amplitude * (1 * (start < t) & 1 * (start+duration > t)) i = x * np.cos(2 * np.pi * frequency * t + phase[0]) q = - x * np.sin(2 * np.pi * frequency * t + phase[1]) return i, q def TTL(t, start, duration, amplitude): x = amplitude * (1 * (start < t) & 1 * (start + duration > t)) return x def sine(t, start, duration, frequency, amplitude, phase): x = amplitude * (1 * (start < t) & 1 * (start+duration > t)) wfm = x * np.sin(2 * np.pi * frequency * t + phase) return wfm class Instrument: """Abstract class for instrument methods. """ def connect(self): pass def start(self): pass def stop(self): pass def close(self): pass class VisaInstrument: """Instrument class that uses the VISA I/O standard. Implementation based on qcodes drivers. """ def __init__(self) -> None: self._visa_handle = None def connect(self, address: str, timeout: int = 10000) -> None: """Connects to the instrument. """ rm = visa.ResourceManager() self._visa_handle = rm.open_resource(address, timeout=timeout) def write(self, msg: Union[bytes, str]) -> None: """Writes a message to the instrument. """ self._visa_handle.write(msg) def query(self, msg: Union[bytes, str]) -> str: """Writes a message to the instrument and read the response. """ return self._visa_handle.query(msg) def read(self) -> str: """Waits for and reads the response from the instrument. """ return self._visa_handle.read() def close(self) -> None: """Closes the instrument connection. """ self._visa_handle.close() def ready(self) -> None: """ Blocking command """ self.query("*OPC?") class TektronixAWG5204(VisaInstrument): """Driver for the Tektronix AWG5204 instrument. """ def __init__(self, name, address): VisaInstrument.__init__(self) self.connect(address) self.name = name self._nchannels = 7 self._sampling_rate = None self._mode = None self._amplitude = [0.75, 0.75, 0.75, 0.75] self._sequence_delay = None self._pulse_buffer = None self._adc_delay = None self._qb_delay = None self._ro_delay = None self._ip = None self._channel_phase = None def setup(self, offset: List[Union[int, float]], amplitude: Optional[List[Union[int, float]]] = [0.75, 0.75, 0.75, 0.75], resolution: Optional[int] = 14, sampling_rate: Optional[Union[int, float]] = 2.5e9, mode: int = MODE_MIXER, sequence_delay: float = 60e-6, pulse_buffer: float = 1e-6, adc_delay: float = 282e-9, qb_delay: float = 292e-9, ro_delay: float = 266e-9, ip: str = "192.168.0.2", channel_phase: List[float] = [-0.10821, 0.00349066, 0.1850049, -0.0383972], **kwargs) -> None: """ Setup the instrument and assigns constants to be used for later. Arguments: offset (float[4]): List of aplitude offset per channel in volts. amplitude (float[4]): List of maximum peak-to-peak amplitude per channel in volts. resolution (float): Bit resolution of the AWG DACs. Normally this is assigned per channel but the driver requires all channels to have the same resolution. sampling_rate (float): Sampling rate of the AWG in S/s. mode (int): Nyquist or mixer frequency generation selection. sequence_delay (float): Time between each pulse sequence in seconds. pulse_buffer (float): Pad time before the start of the pulse sequence and after the end of the pulse sequence in seconds. adc_delay (float): Delay for the start of the ADC trigger signal in seconds. qb_delay (float): Delay for the start of the qubit switch TTL signal in seconds. ro_delay (float): Delay for the start of the readout switch TTL signal in seconds. ip (str): IP address for the device for waveform transfer. channel_phase (float[4]): Phase in radians for each channel. Used primarily on mixer mode to promote target sideband. """ # Reset the instrument and assign amplitude, offset and resolution per channel self.reset() for idx in range(4): ch = idx + 1 self.write("SOURCe{}:VOLTage {}".format(ch, amplitude[idx])) self._amplitude[idx] = amplitude[idx] self.write("SOURCE{}:VOLTAGE:LEVEL:IMMEDIATE:OFFSET {}".format(ch, offset[ch - 1])) self.write("SOURce{}:DAC:RESolution {}".format(ch, resolution)) # Set the DAC modes and sampling rate self.write("SOUR1:DMOD NRZ") self.write("SOUR2:DMOD NRZ") self.write("CLOCk:SRATe {}".format(sampling_rate)) if mode == MODE_NYQUIST: self.write("SOUR3:DMOD MIX") self.write("SOUR4:DMOD MIX") else: self.write("SOUR3:DMOD NRZ") self.write("SOUR4:DMOD NRZ") # Assigns constants to be used later self._mode = mode self._sampling_rate = sampling_rate self._pulse_buffer = pulse_buffer self._sequence_delay = sequence_delay self._qb_delay = qb_delay self._ro_delay = ro_delay self._adc_delay = adc_delay self._ip = ip self._channel_phase = channel_phase self.ready() def reset(self) -> None: """Reset the instrument back to AWG mode. """ self.write("INSTrument:MODE AWG") self.write("CLOC:SOUR EFIX") # Set AWG to external reference, 10 MHz self.write("CLOC:OUTP:STAT OFF") # Disable clock output self.clear() def clear(self) -> None: """Clear loaded waveform and sequences. """ self.write('SLISt:SEQuence:DELete ALL') self.write('WLISt:WAVeform:DELete ALL') self.ready() def translate(self, sequence, shots): """ Translates the pulse sequence into Tektronix .seqx file Arguments: sequence (qibolab.pulses.Pulse[]): Array containing pulses to be fired on this instrument. shots (int): Number of repetitions. """ import broadbean as bb from qibolab.pulses import ReadoutPulse from qcodes.instrument_drivers.tektronix.AWG70000A import AWG70000A # First create np arrays for each channel start = min(pulse.start for pulse in sequence) end = max(pulse.start + pulse.duration for pulse in sequence) t = np.arange(start * 1e-9 - self._pulse_buffer, end * 1e-9 + self._pulse_buffer, 1 / self._sampling_rate) wfm = np.zeros((self._nchannels, len(t))) for pulse in sequence: # Convert pulse timings from nanoseconds to seconds start = pulse.start * 1e-9 duration = pulse.duration * 1e-9 if isinstance(pulse, ReadoutPulse): # Readout IQ Signal i_ch = pulse.channel[0] q_ch = pulse.channel[1] phase = (self._channel_phase[i_ch] + pulse.phase, self._channel_phase[q_ch] + pulse.phase) i_wfm, q_wfm = square(t, start, duration, pulse.frequency, pulse.amplitude, phase) wfm[i_ch] += i_wfm wfm[q_ch] += q_wfm # ADC TTL wfm[4] = TTL(t, start + self._adc_delay , 10e-9, 1) # RO SW TTL wfm[5] = TTL(t, start + self._ro_delay, duration, 1) # QB SW TTL wfm[6] = TTL(t, start + self._qb_delay, duration, 1) else: if self._mode == MODE_MIXER: # Qubit IQ signal i_ch = pulse.channel[0] q_ch = pulse.channel[1] phase = (self._channel_phase[i_ch] + pulse.phase, self._channel_phase[q_ch] + pulse.phase) i_wfm, q_wfm = square(t, start, duration, pulse.frequency, pulse.amplitude, phase) wfm[i_ch] += i_wfm wfm[q_ch] += q_wfm else: qb_wfm = sine(t, start, duration, pulse.frequency, pulse.amplitude, pulse.phase) wfm[pulse.channel] += qb_wfm # Add waveform arrays to broadbean sequencing main_sequence = bb.Sequence() main_sequence.name = "MainSeq" main_sequence.setSR(self._sampling_rate) # Dummy waveform on repeat to create delay between shots dummy = np.zeros(len(t)) unit_delay = 1e-6 sample_delay = np.zeros(int(unit_delay * self._sampling_rate)) delay_wfm = bb.Element() for ch in range(1, 5): delay_wfm.addArray(ch, sample_delay, self._sampling_rate, m1=sample_delay, m2=sample_delay) # Add pulses into waveform waveform = bb.Element() waveform.addArray(1, wfm[0], self._sampling_rate, m1=wfm[4], m2=wfm[5]) waveform.addArray(2, wfm[1], self._sampling_rate, m1=dummy, m2=wfm[6]) waveform.addArray(3, wfm[2], self._sampling_rate, m1=dummy, m2=dummy) waveform.addArray(4, wfm[3], self._sampling_rate, m1=dummy, m2=dummy) # Add subsequence to hold pulse waveforms and delay waveform subseq = bb.Sequence() subseq.name = "SubSeq" subseq.setSR(self._sampling_rate) subseq.addElement(1, waveform) subseq.addElement(2, delay_wfm) subseq.setSequencingNumberOfRepetitions(2, int(self._sequence_delay / unit_delay)) # Add sequence to play subsequence up to the number of shots. main_sequence.addSubSequence(1, subseq) main_sequence.setSequencingTriggerWait(1, 1) main_sequence.setSequencingNumberOfRepetitions(1, shots) main_sequence.setSequencingGoto(1, 1) # Compile waveform into payload # TODO: On fresh installation, fix bug in AWG70000A driver with regards to this method. payload = main_sequence.forge(apply_delays=False, apply_filters=False) payload = AWG70000A.make_SEQX_from_forged_sequence(payload, self._amplitude, "MainSeq") return payload def upload(self, payload): """ Uploads the .seqx file to the AWG and loads it """ import time with open("//{}/Users/OEM/Documents/MainSeq.seqx".format(self._ip), "wb+") as w: w.write(payload) pathstr = 'C:\\Users\\OEM\\Documents\\MainSeq.seqx' self.write('MMEMory:OPEN:SASSet:SEQuence "{}"'.format(pathstr)) start = time.time() while True: elapsed = time.time() - start if int(self.query("*OPC?")) == 1: break elif elapsed > self._visa_handle.timeout: raise RuntimeError("AWG took too long to load waveforms") for ch in range(1, 5): self.write('SOURCE{}:CASSet:SEQuence "MainSeq", {}'.format(ch, ch)) self.ready() def play_sequence(self): """ Arms the AWG for playback on trigger A """ for ch in range(1, 5): self.write("OUTPut{}:STATe 1".format(ch)) self.write('SOURce{}:RMODe TRIGgered'.format(ch)) self.write('SOURce1{}TINPut ATRIGGER'.format(ch)) # Arm the trigger self.write('AWGControl:RUN:IMMediate') self.ready() def stop(self): """ Stops the AWG and turns off all channels """ self.write('AWGControl:STOP') for ch in range(1, 5): self.write("OUTPut{}:STATe 0".format(ch)) def start_experiment(self): """ Triggers the AWG to start playing """ self.write('TRIGger:IMMediate ATRigger') class MCAttenuator(Instrument): """Driver for the MiniCircuit RCDAT-8000-30 variable attenuator. """ def __init__(self, name, address): self.name = name self._address = address def setup(self, attenuation: float): """Assigns the attenuation level on the attenuator. Arguments: attenuation(float): Attenuation setting in dB. Ranges from 0 to 35. """ import urllib3 http = urllib3.PoolManager() http.request('GET', 'http://{}/SETATT={}'.format(self._address, attenuation)) class QuicSyn(VisaInstrument): """Driver for the National Instrument QuicSyn Lite local oscillator. """ def __init__(self, name, address): VisaInstrument.__init__(self) self.name = name self.connect(address) self.write('0601') # EXT REF def setup(self, frequency): """ Sets the frequency in Hz """ self.write('FREQ {0:f}Hz'.format(frequency)) def start(self): """Starts the instrument. """ self.write('0F01') def stop(self): """Stops the instrument. """ self.write('0F00') class AlazarADC(ATS.AcquisitionController, Instrument): """Driver for the AlazarTech ATS9371 ADC. """ def __init__(self, name="alz_cont", address="Alazar1", **kwargs): from qibolab.instruments.ATS9371 import AlazarTech_ATS9371 self.adc = AlazarTech_ATS9371(address) self.acquisitionkwargs = {} self.samples_per_record = None self.records_per_buffer = None self.buffers_per_acquisition = None self.results = None self.number_of_channels = 2 self.buffer = None self._samples = None self._thread = None self._processed_data = None super().__init__(name, address, **kwargs) self.add_parameter("acquisition", get_cmd=self.do_acquisition) def setup(self, samples): """Setup the ADC. Arguments: samples (int): Number of samples to be acquired. TODO: Set trigger voltage as a variable. """ trigger_volts = 1 input_range_volts = 2.5 trigger_level_code = int(128 + 127 * trigger_volts / input_range_volts) with self.adc.syncing(): self.adc.clock_source("EXTERNAL_CLOCK_10MHz_REF") #self.adc.clock_source("INTERNAL_CLOCK") self.adc.external_sample_rate(1_000_000_000) #self.adc.sample_rate(1_000_000_000) self.adc.clock_edge("CLOCK_EDGE_RISING") self.adc.decimation(1) self.adc.coupling1('DC') self.adc.coupling2('DC') self.adc.channel_range1(.02) #self.adc.channel_range2(.4) self.adc.channel_range2(.02) self.adc.impedance1(50) self.adc.impedance2(50) self.adc.bwlimit1("DISABLED") self.adc.bwlimit2("DISABLED") self.adc.trigger_operation('TRIG_ENGINE_OP_J') self.adc.trigger_engine1('TRIG_ENGINE_J') self.adc.trigger_source1('EXTERNAL') self.adc.trigger_slope1('TRIG_SLOPE_POSITIVE') self.adc.trigger_level1(trigger_level_code) self.adc.trigger_engine2('TRIG_ENGINE_K') self.adc.trigger_source2('DISABLE') self.adc.trigger_slope2('TRIG_SLOPE_POSITIVE') self.adc.trigger_level2(128) self.adc.external_trigger_coupling('DC') self.adc.external_trigger_range('ETR_2V5') self.adc.trigger_delay(0) #self.aux_io_mode('NONE') # AUX_IN_TRIGGER_ENABLE for seq mode on #self.aux_io_param('NONE') # TRIG_SLOPE_POSITIVE for seq mode on self.adc.timeout_ticks(0) self._samples = samples def update_acquisitionkwargs(self, **kwargs): """ This method must be used to update the kwargs used for the acquisition with the alazar_driver.acquire :param kwargs: :return: """ self.acquisitionkwargs.update(**kwargs) def arm(self, shots): """Arms the ADC for acqusition. Arguments: shots (int): Number of trigger signals to be expected. TODO: Wait for ADC to be ready for acquisition instead of fixed time duration. """ import threading import time self.update_acquisitionkwargs(mode='NPT', samples_per_record=self._samples, records_per_buffer=10, buffers_per_acquisition=int(shots / 10), allocated_buffers=100, buffer_timeout=10000) self.pre_start_capture() self._thread = threading.Thread(target=self.do_acquisition, args=()) self._thread.start() time.sleep(1) def pre_start_capture(self): self.samples_per_record = self.adc.samples_per_record.get() self.records_per_buffer = self.adc.records_per_buffer.get() self.buffers_per_acquisition = self.adc.buffers_per_acquisition.get() sample_speed = self.adc.get_sample_rate() t_final = self.samples_per_record / sample_speed self.time_array = np.arange(0, t_final, 1 / sample_speed) self.buffer = np.zeros(self.samples_per_record * self.records_per_buffer * self.number_of_channels) def pre_acquire(self): """ See AcquisitionController :return: """ # this could be used to start an Arbitrary Waveform Generator, etc... # using this method ensures that the contents are executed AFTER the # Alazar card starts listening for a trigger pulse pass def handle_buffer(self, data, buffer_number=None): """ See AcquisitionController :return: """ self.buffer += data def post_acquire(self): """ See AcquisitionController :return: """ def signal_to_volt(signal, voltdiv): u12 = signal / 16 #bitsPerSample = 12 codeZero = 2047.5 codeRange = codeZero return voltdiv * (u12 - codeZero) / codeRange records_per_acquisition = (1. * self.buffers_per_acquisition * self.records_per_buffer) recordA = np.zeros(self.samples_per_record) recordB = np.zeros(self.samples_per_record) # Interleaved samples for i in range(self.records_per_buffer): record_start = i * self.samples_per_record * 2 record_stop = record_start + self.samples_per_record * 2 record_slice = self.buffer[record_start:record_stop] recordA += record_slice[0::2] / records_per_acquisition recordB += record_slice[1::2] / records_per_acquisition recordA = signal_to_volt(recordA, 0.02) recordB = signal_to_volt(recordB, 0.02) self._processed_data = np.array([recordA, recordB]) return self.buffer, self.buffers_per_acquisition, self.records_per_buffer, self.samples_per_record, self.time_array def do_acquisition(self): """ this method performs an acquisition, which is the get_cmd for the acquisiion parameter of this instrument :return: """ self._get_alazar().acquire(acquisition_controller=self, **self.acquisitionkwargs) def result(self, readout_frequency): """Returns the processed signal result from the ADC. Arguments: readout_frequency (float): Frequency to be used for signal processing. Returns: ampl (float): Amplitude of the processed signal. phase (float): Phase shift of the processed signal in degrees. it (float): I component of the processed signal. qt (float): Q component of the processed signal. """ self._thread.join() # TODO: Pass ADC channel as arg instead of hardcoded channels input_vec_I = self._processed_data[0] input_vec_Q = self._processed_data[1] it = 0 qt = 0 for i in range(self.samples_per_record): it += input_vec_I[i] * np.cos(2 * np.pi * readout_frequency * self.time_array[i]) qt += input_vec_Q[i] * np.cos(2 * np.pi * readout_frequency * self.time_array[i]) phase = np.arctan2(qt, it) * 180 / np.pi ampl = np.sqrt(it**2 + qt**2) return ampl, phase, it, qt def close(self): """Closes the instrument. """ self._alazar.close() super().close() src/qibolab/instruments/__init__.py METASEP from qibolab.instruments.qblox import PulsarQRM, PulsarQCM from qibolab.instruments.rohde_schwarz import SGS100A from qibolab.instruments.icarusq import TektronixAWG5204, AlazarADC, MCAttenuator, QuicSyn src/qibolab/instruments/ATS9371.py METASEP """ Adapted from the qcodes ATS9373 driver """ from distutils.version import LooseVersion import numpy as np from qcodes.utils import validators from qcodes.instrument_drivers.AlazarTech.ATS import AlazarTech_ATS from qcodes.instrument_drivers.AlazarTech.utils import TraceParameter class AlazarTech_ATS9371(AlazarTech_ATS): """ This class is the driver for the ATS9373 board. Note that this board is very similar to ATS9360. Refer to ATS SDK for details. Note that channels of this board have 12-bit resolution (see `IDN()['bits_per_sample']`) which means that the raw data that is returned by the card should be converted to uint16 type with a bit shift by 4 bits. Refer to ATS SDK for more infromation. """ samples_divisor = 128 _trigger_holdoff_min_fw_version = '30.04' def __init__(self, name, **kwargs): dll_path = 'C:\\WINDOWS\\System32\\ATSApi.dll' super().__init__(name, dll_path=dll_path, **kwargs) # add parameters # ----- Parameters for the configuration of the board ----- self.add_parameter(name='clock_source', parameter_class=TraceParameter, get_cmd=None, label='Clock Source', unit=None, initial_value='INTERNAL_CLOCK', val_mapping={'INTERNAL_CLOCK': 1, 'FAST_EXTERNAL_CLOCK': 2, 'EXTERNAL_CLOCK_10MHz_REF': 7}) self.add_parameter(name='external_sample_rate', get_cmd=None, parameter_class=TraceParameter, label='External Sample Rate', unit='S/s', vals=validators.MultiType(validators.Ints(300000000, 1000000000), validators.Enum('UNDEFINED')), initial_value='UNDEFINED') self.add_parameter(name='sample_rate', get_cmd=None, parameter_class=TraceParameter, label='Internal Sample Rate', unit='S/s', initial_value='UNDEFINED', val_mapping={1_000: 1, 2_000: 2, 5_000: 4, 10_000: 8, 20_000: 10, 50_000: 12, 100_000: 14, 200_000: 16, 500_000: 18, 1_000_000: 20, 2_000_000: 24, 5_000_000: 26, 10_000_000: 28, 20_000_000: 30, 25_000_000: 33, 50_000_000: 34, 100_000_000: 36, 125_000_000: 37, 160_000_000: 38, 180_000_000: 39, 200_000_000: 40, 250_000_000: 43, 500_000_000: 48, 800_000_000: 50, 1_000_000_000: 53, 'EXTERNAL_CLOCK': 64, 'UNDEFINED': 'UNDEFINED'}) self.add_parameter(name='clock_edge', get_cmd=None, parameter_class=TraceParameter, label='Clock Edge', unit=None, initial_value='CLOCK_EDGE_RISING', val_mapping={'CLOCK_EDGE_RISING': 0, 'CLOCK_EDGE_FALLING': 1}) self.add_parameter(name='decimation', get_cmd=None, parameter_class=TraceParameter, label='Decimation', unit=None, initial_value=1, vals=validators.Ints(0, 100000)) for i in ['1', '2']: self.add_parameter(name='coupling' + i, get_cmd=None, parameter_class=TraceParameter, label='Coupling channel ' + i, unit=None, initial_value='DC', val_mapping={'AC': 1, 'DC': 2}) self.add_parameter(name='channel_range' + i, get_cmd=None, parameter_class=TraceParameter, label='Range channel ' + i, unit='V', initial_value=0.4, val_mapping={0.02: 1, 0.04: 2, 0.05: 3, 0.08: 4, 0.1: 5, 0.2: 6, 0.4: 7, 0.5: 8, 0.8: 9 }) self.add_parameter(name='impedance' + i, get_cmd=None, parameter_class=TraceParameter, label='Impedance channel ' + i, unit='Ohm', initial_value=50, val_mapping={50: 2}) self.add_parameter(name='bwlimit' + i, get_cmd=None, parameter_class=TraceParameter, label='Bandwidth limit channel ' + i, unit=None, initial_value='DISABLED', val_mapping={'DISABLED': 0, 'ENABLED': 1}) self.add_parameter(name='trigger_operation', get_cmd=None, parameter_class=TraceParameter, label='Trigger Operation', unit=None, initial_value='TRIG_ENGINE_OP_J', val_mapping={'TRIG_ENGINE_OP_J': 0, 'TRIG_ENGINE_OP_K': 1, 'TRIG_ENGINE_OP_J_OR_K': 2, 'TRIG_ENGINE_OP_J_AND_K': 3, 'TRIG_ENGINE_OP_J_XOR_K': 4, 'TRIG_ENGINE_OP_J_AND_NOT_K': 5, 'TRIG_ENGINE_OP_NOT_J_AND_K': 6}) for i in ['1', '2']: self.add_parameter(name='trigger_engine' + i, get_cmd=None, parameter_class=TraceParameter, label='Trigger Engine ' + i, unit=None, initial_value='TRIG_ENGINE_' + ('J' if i == '1' else 'K'), val_mapping={'TRIG_ENGINE_J': 0, 'TRIG_ENGINE_K': 1}) self.add_parameter(name='trigger_source' + i, get_cmd=None, parameter_class=TraceParameter, label='Trigger Source ' + i, unit=None, initial_value='EXTERNAL', val_mapping={'CHANNEL_A': 0, 'CHANNEL_B': 1, 'EXTERNAL': 2, 'DISABLE': 3}) self.add_parameter(name='trigger_slope' + i, get_cmd=None, parameter_class=TraceParameter, label='Trigger Slope ' + i, unit=None, initial_value='TRIG_SLOPE_POSITIVE', val_mapping={'TRIG_SLOPE_POSITIVE': 1, 'TRIG_SLOPE_NEGATIVE': 2}) self.add_parameter(name='trigger_level' + i, get_cmd=None, parameter_class=TraceParameter, label='Trigger Level ' + i, unit=None, initial_value=140, vals=validators.Ints(0, 255)) self.add_parameter(name='external_trigger_coupling', get_cmd=None, parameter_class=TraceParameter, label='External Trigger Coupling', unit=None, initial_value='DC', val_mapping={'AC': 1,'DC': 2}) self.add_parameter(name='external_trigger_range', get_cmd=None, parameter_class=TraceParameter, label='External Trigger Range', unit=None, initial_value='ETR_2V5', val_mapping={'ETR_TTL': 2, 'ETR_2V5': 3}) self.add_parameter(name='trigger_delay', get_cmd=None, parameter_class=TraceParameter, label='Trigger Delay', unit='Sample clock cycles', initial_value=0, vals=validators.Multiples(divisor=8, min_value=0)) # See Table 3 - Trigger Delay Alignment # TODO: this is either 8 or 16 dependent on the number of channels in use # NOTE: The board will wait for a for this amount of time for a # trigger event. If a trigger event does not arrive, then the # board will automatically trigger. Set the trigger timeout value # to 0 to force the board to wait forever for a trigger event. # # IMPORTANT: The trigger timeout value should be set to zero after # appropriate trigger parameters have been determined, otherwise # the board may trigger if the timeout interval expires before a # hardware trigger event arrives. self.add_parameter(name='timeout_ticks', get_cmd=None, parameter_class=TraceParameter, label='Timeout Ticks', unit='10 us', initial_value=0, vals=validators.Ints(min_value=0)) self.add_parameter(name='aux_io_mode', get_cmd=None, parameter_class=TraceParameter, label='AUX I/O Mode', unit=None, initial_value='AUX_IN_AUXILIARY', val_mapping={'AUX_OUT_TRIGGER': 0, 'AUX_IN_TRIGGER_ENABLE': 1, 'AUX_IN_AUXILIARY': 13}) self.add_parameter(name='aux_io_param', get_cmd=None, parameter_class=TraceParameter, label='AUX I/O Param', unit=None, initial_value='NONE', val_mapping={'NONE': 0, 'TRIG_SLOPE_POSITIVE': 1, 'TRIG_SLOPE_NEGATIVE': 2}) # ----- Parameters for the acquire function ----- self.add_parameter(name='mode', label='Acquisition mode', unit=None, initial_value='NPT', get_cmd=None, set_cmd=None, val_mapping={'NPT': 0x200, 'TS': 0x400}) self.add_parameter(name='samples_per_record', label='Samples per Record', unit=None, initial_value=1024, get_cmd=None, set_cmd=None, vals=validators.Multiples( divisor=self.samples_divisor, min_value=256)) self.add_parameter(name='records_per_buffer', label='Records per Buffer', unit=None, initial_value=10, get_cmd=None, set_cmd=None, vals=validators.Ints(min_value=0)) self.add_parameter(name='buffers_per_acquisition', label='Buffers per Acquisition', unit=None, get_cmd=None, set_cmd=None, initial_value=10, vals=validators.Ints(min_value=0)) self.add_parameter(name='channel_selection', label='Channel Selection', unit=None, get_cmd=None, set_cmd=None, initial_value='AB', val_mapping={'A': 1, 'B': 2, 'AB': 3}) self.add_parameter(name='transfer_offset', label='Transfer Offset', unit='Samples', get_cmd=None, set_cmd=None, initial_value=0, vals=validators.Ints(min_value=0)) self.add_parameter(name='external_startcapture', label='External Startcapture', unit=None, get_cmd=None, set_cmd=None, initial_value='ENABLED', val_mapping={'DISABLED': 0X0, 'ENABLED': 0x1}) self.add_parameter(name='enable_record_headers', label='Enable Record Headers', unit=None, get_cmd=None, set_cmd=None, initial_value='DISABLED', val_mapping={'DISABLED': 0x0, 'ENABLED': 0x8}) self.add_parameter(name='alloc_buffers', label='Alloc Buffers', unit=None, get_cmd=None, set_cmd=None, initial_value='DISABLED', val_mapping={'DISABLED': 0x0, 'ENABLED': 0x20}) self.add_parameter(name='fifo_only_streaming', label='Fifo Only Streaming', unit=None, get_cmd=None, set_cmd=None, initial_value='DISABLED', val_mapping={'DISABLED': 0x0, 'ENABLED': 0x800}) self.add_parameter(name='interleave_samples', label='Interleave Samples', unit=None, get_cmd=None, set_cmd=None, initial_value='DISABLED', val_mapping={'DISABLED': 0x0, 'ENABLED': 0x1000}) self.add_parameter(name='get_processed_data', label='Get Processed Data', unit=None, get_cmd=None, set_cmd=None, initial_value='DISABLED', val_mapping={'DISABLED': 0x0, 'ENABLED': 0x2000}) self.add_parameter(name='allocated_buffers', label='Allocated Buffers', unit=None, get_cmd=None, set_cmd=None, initial_value=4, vals=validators.Ints(min_value=0)) self.add_parameter(name='buffer_timeout', label='Buffer Timeout', unit='ms', get_cmd=None, set_cmd=None, initial_value=1000, vals=validators.Ints(min_value=0)) self.add_parameter(name='trigger_holdoff', label='Trigger Holdoff', docstring=f'If enabled Alazar will ' f'ignore any additional triggers ' f'while capturing a record. If disabled ' f'this will result in corrupt data. ' f'Support for this requires at least ' f'firmware version ' f'{self._trigger_holdoff_min_fw_version}', vals=validators.Bool(), get_cmd=self._get_trigger_holdoff, set_cmd=self._set_trigger_holdoff) model = self.get_idn()['model'] if model != 'ATS9371': raise Exception("The Alazar board kind is not 'ATS9371'," " found '" + str(model) + "' instead.") def _get_trigger_holdoff(self) -> bool: fwversion = self.get_idn()['firmware'] if LooseVersion(fwversion) < \ LooseVersion(self._trigger_holdoff_min_fw_version): return False # we want to check if the 26h bit (zero indexed) is high or not output = np.uint32(self._read_register(58)) # the two first two chars in the bit string is the sign and a 'b' # remove those to only get the bit pattern bitmask = bin(output)[2:] # all prefixed zeros are ignored in the bit conversion so the # bit mask may be shorter than what we expect. in that case # the bit we care about is zero so we return False if len(bitmask) < 27: return False return bool(bin(output)[-27]) def _set_trigger_holdoff(self, value: bool) -> None: fwversion = self.get_idn()['firmware'] if LooseVersion(fwversion) < \ LooseVersion(self._trigger_holdoff_min_fw_version): raise RuntimeError(f"Alazar 9360 requires at least firmware " f"version {self._trigger_holdoff_min_fw_version}" f" for trigger holdoff support. " f"You have version {fwversion}") current_value = self._read_register(58) if value is True: # to enable trigger hold off we want to flip the # 26th bit to 1. We do that by making a bitwise or # with a number that has a 1 on the 26th place and zero # otherwise. We use numpy.unit32 instead of python numbers # to have unsigned ints of the right size enable_mask = np.uint32(1 << 26) new_value = current_value | enable_mask else: # to disable trigger hold off we want to flip the # 26th bit to 0. We do that by making a bitwise and # with a number that has a 0 on the 26th place and 1 # otherwise disable_mask = ~np.uint32(1 << 26) # pylint: disable=E1130 new_value = current_value & disable_mask self._write_register(58, new_value) src/qibolab/states.py METASEP from qibo import K from qibo.abstractions.states import AbstractState from qibo.config import raise_error class HardwareState(AbstractState): def __init__(self, nqubits=None): if nqubits > 1: raise_error(NotImplementedError, "Hardware device has one qubit.") super().__init__(nqubits) self.readout = None self.normalized_voltage = None self.min_voltage = None self.max_voltage = None @property def shape(self): # pragma: no cover raise_error(NotImplementedError) @property def dtype(self): # pragma: no cover raise_error(NotImplementedError) def symbolic(self, decimals=5, cutoff=1e-10, max_terms=20): # pragma: no cover raise_error(NotImplementedError) def __array__(self): # pragma: no cover raise_error(NotImplementedError) def numpy(self): # pragma: no cover raise_error(NotImplementedError) def state(self, numpy=False, decimals=-1, cutoff=1e-10, max_terms=20): raise_error(NotImplementedError) @classmethod def from_readout(cls, readout, min_voltage, max_voltage): state = cls(1) state.readout = readout state.min_voltage = min_voltage state.max_voltage = max_voltage norm = max_voltage - min_voltage state.normalized_voltage = (readout[0] * 1e6 - min_voltage) / norm return state @classmethod def zero_state(cls, nqubits): # pragma: no cover raise_error(NotImplementedError) @classmethod def plus_state(cls, nqubits): # pragma: no cover raise_error(NotImplementedError) def copy(self, min_voltage=None, max_voltage=None): new = super().copy() new.readout = self.readout if min_voltage is not None: new.min_voltage = min_voltage else: new.min_voltage = self.min_voltage if max_voltage is not None: new.max_voltage = max_voltage else: new.max_voltage = self.max_voltage norm = new.max_voltage - new.min_voltage new.normalized_voltage = (new.readout[0] * 1e6 - new.min_voltage) / norm return new def to_density_matrix(self): # pragma: no cover raise_error(NotImplementedError) def probabilities(self, qubits=None, measurement_gate=None): p = self.normalized_voltage return K.cast([p, 1 - p], dtype="DTYPE") def measure(self, gate, nshots, registers=None): # pragma: no cover raise_error(NotImplementedError) def set_measurements(self, qubits, samples, registers=None): # pragma: no cover raise_error(NotImplementedError) def samples(self, binary=True, registers=False): # pragma: no cover raise_error(NotImplementedError) def frequencies(self, binary=True, registers=False): # pragma: no cover raise_error(NotImplementedError) def apply_bitflips(self, p0, p1=None): # pragma: no cover raise_error(NotImplementedError, "Noise simulation is not required for hardware.") def expectation(self, hamiltonian, normalize=False): # pragma: no cover # FIXME: This is the expectation value of <Z> only! return 2 * self.probabilities()[0] - 1 src/qibolab/pulses.py METASEP """Pulse abstractions.""" import bisect import numpy as np class Pulse: """Describes a single pulse to be added to waveform array. Args: start (float): Start time of pulse in ns. duration (float): Pulse duration in ns. amplitude (float): Pulse digital amplitude (unitless) [0 to 1]. frequency (float): Pulse Intermediate Frequency in Hz [10e6 to 300e6]. phase (float): To be added. shape: (PulseShape): Pulse shape. See :py:mod:`qibolab.pulses_shapes` for list of available shapes. offset_i (float): Optional pulse I offset (unitless). (amplitude + offset) should be between [0 and 1]. offset_q (float): Optional pulse Q offset (unitless). (amplitude + offset) should be between [0 and 1]. channel (int/str): Specifies the device that will execute this pulse. FPGA channel (int) for IcarusQ or qrm/qcm (str) for TIIq. qubit (int): Target qubit ID Example: .. code-block:: python from qibolab.pulses import Pulse from qibolab.pulse_shapes import Gaussian # define pulse with Gaussian shape pulse = Pulse(start=0, frequency=200000000.0, amplitude=0.3, duration=60, phase=0, shape=Gaussian(60 / 5)) """ def __init__(self, start, duration, amplitude, frequency, phase, shape, offset_i=0, offset_q=0, channel="qcm", qubit=0): # FIXME: Since the ``start`` value depends on the previous pulses we are # not sure if it should be a local property of the ``Pulse`` object self.start = start self.duration = duration self.amplitude = amplitude self.frequency = frequency self.phase = phase self.shape = shape # PulseShape objects self.channel = channel self.offset_i = offset_i self.offset_q = offset_q self.qubit = qubit def serial(self): return "P({}, {}, {}, {}, {}, {}, {})".format(self.channel, self.start, self.duration, self.amplitude, self.frequency, self.phase, self.shape) ### IcarusQ specific method ### #def compile(self, waveform, sequence): # i_start = bisect.bisect(sequence.time, self.start) # #i_start = int((self.start / sequence.duration) * sequence.sample_size) # i_duration = int((self.duration / sequence.duration) * sequence.sample_size) # time = sequence.time[i_start:i_start + i_duration] # envelope = self.shape.envelope(time, self.start, self.duration, self.amplitude) # waveform[self.channel, i_start:i_start + i_duration] += ( # envelope * np.sin(2 * np.pi * self.frequency * time + self.phase)) # return waveform def compile(self): return self.shape.envelope(None, None, self.duration, self.amplitude) def __repr__(self): return self.serial() class ReadoutPulse(Pulse): """Describes a readout pulse. See :class:`qibolab.pulses.Pulse` for argument desciption. """ def __init__(self, start, duration, amplitude, frequency, phase, shape, offset_i=0, offset_q=0, channel="qrm", qubit=0): super().__init__(start, duration, amplitude, frequency, phase, shape, offset_i, offset_q, channel, qubit) class IQReadoutPulse(Pulse): # TODO: Remove this or think how to merge with ``ReadoutPulse``. # Currently keeping it for compatibility with IcarusQ as it breaks the import """ Describes a pair of IQ pulses for the readout Args: channels (int): Pair of FPGA channels to play pulses on. start (float): Start time of pulse in seconds. duration (float): Pulse duration in seconds. amplitude (float): Pulse amplitude in volts. frequency (float): Pulse frequency in Hz. phases (float): Pulse phase offset for mixer sideband. """ def __init__(self, channels, start, duration, amplitude, frequency, phases): self.channels = channels self.start = start self.duration = duration self.amplitude = amplitude self.frequency = frequency self.phases = phases def serial(self): return "" def compile(self, waveform, sequence): i_start = bisect.bisect(sequence.time, self.start) #i_start = int((self.start / sequence.duration) * sequence.sample_size) i_duration = int((self.duration / sequence.duration) * sequence.sample_size) time = sequence.time[i_start:i_start + i_duration] waveform[self.channels[0], i_start:i_start + i_duration] += self.amplitude * np.cos(2 * np.pi * self.frequency * time + self.phases[0]) waveform[self.channels[1], i_start:i_start + i_duration] -= self.amplitude * np.sin(2 * np.pi * self.frequency * time + self.phases[1]) return waveform class MultifrequencyPulse(Pulse): """Describes multiple pulses to be added to waveform array. Used when multiple pulses are overlapping to avoid overwrite. """ def __init__(self, members): self.members = members def serial(self): return "M({})".format(", ".join([m.serial() for m in self.members])) def compile(self, waveform, sequence): for member in self.members: waveform += member.compile(waveform, sequence) return waveform class FilePulse(Pulse): """Commands the FPGA to load a file as a waveform array in the specified channel.""" def __init__(self, channel, start, filename): self.channel = channel self.start = start self.filename = filename def serial(self): return "F({}, {}, {})".format(self.channel, self.start, self.filename) def compile(self, waveform, sequence): # `FilePulse` cannot be tested in CI because a file is not available i_start = int((self.start / sequence.duration) * sequence.sample_size) arr = np.genfromtxt(sequence.file_dir, delimiter=',')[:-1] waveform[self.channel, i_start:i_start + len(arr)] = arr return waveform src/qibolab/pulse_shapes.py METASEP import numpy as np from abc import ABC, abstractmethod from qibo.config import raise_error class PulseShape(ABC): """Describes the pulse shape to be used.""" def __init__(self): # pragma: no cover self.name = "" @abstractmethod def envelope(self, time, start, duration, amplitude): # pragma: no cover raise_error(NotImplementedError) def __repr__(self): return "({})".format(self.name) class Rectangular(PulseShape): """Rectangular/square pulse shape.""" def __init__(self): self.name = "rectangular" def envelope(self, time, start, duration, amplitude): """Constant amplitude envelope.""" #return amplitude # FIXME: This may have broken IcarusQ return amplitude * np.ones(int(duration)) class Gaussian(PulseShape): """Gaussian pulse shape""" def __init__(self, sigma): self.name = "gaussian" self.sigma = sigma def envelope(self, time, start, duration, amplitude): """Gaussian envelope centered with respect to the pulse. .. math:: A\exp^{-\\frac{1}{2}\\frac{(t-\mu)^2}{\sigma^2}} """ from scipy.signal import gaussian return amplitude * gaussian(int(duration), std=self.sigma) # FIXME: This may have broken IcarusQ #mu = start + duration / 2 #return amplitude * np.exp(-0.5 * (time - mu) ** 2 / self.sigma ** 2) def __repr__(self): return "({}, {})".format(self.name, self.sigma) class Drag(PulseShape): """Derivative Removal by Adiabatic Gate (DRAG) pulse shape.""" def __init__(self, sigma, beta): self.name = "drag" self.sigma = sigma self.beta = beta def envelope(self, time, start, duration, amplitude): """DRAG envelope centered with respect to the pulse. .. math:: G + i\\beta(-\\frac{t-\mu}{\sigma^2})G where .. math:: G = A\exp^{-\\frac{1}{2}\\frac{(t-\mu)^2}{\sigma^2}} """ mu = start + duration / 2 gaussian = amplitude * np.exp(-0.5 * (time - mu) ** 2 / self.sigma ** 2) return gaussian + 1j * self.beta * (-(time - mu) / self.sigma ** 2) * gaussian def __repr__(self): return "({}, {}, {})".format(self.name, self.sigma, self.beta) class SWIPHT(PulseShape): """Speeding up Wave forms by Inducing Phase to Harmful Transitions pulse shape.""" def __init__(self, g): self.name = "SWIPHT" self.g = g def envelope(self, time, start, duration, amplitude): ki_qq = self.g * np.pi t_g = 5.87 / (2 * abs(ki_qq)) t = np.linspace(0, t_g, len(time)) gamma = 138.9 * (t / t_g)**4 *(1 - t / t_g)**4 + np.pi / 4 gamma_1st = 4 * 138.9 * (t / t_g)**3 * (1 - t / t_g)**3 * (1 / t_g - 2 * t / t_g**2) gamma_2nd = 4*138.9*(t / t_g)**2 * (1 - t / t_g)**2 * (14*(t / t_g**2)**2 - 14*(t / t_g**3) + 3 / t_g**2) omega = gamma_2nd / np.sqrt(ki_qq**2 - gamma_1st**2) - 2*np.sqrt(ki_qq**2 - gamma_1st**2) * 1 / np.tan(2 * gamma) omega = omega / max(omega) return omega * amplitude def __repr__(self): return "({}, {})".format(self.name, self.g) src/qibolab/platform.py METASEP import pathlib from qibo.config import raise_error def Platform(name, runcard=None): """Platform for controlling quantum devices. Args: name (str): name of the platform. Options are 'tiiq', 'qili' and 'icarusq'. runcard (str): path to the yaml file containing the platform setup. Returns: The plaform class. """ if not runcard: runcard = pathlib.Path(__file__).parent / "runcards" / f"{name}.yml" if name == 'tiiq' or name == 'qili': from qibolab.platforms.qbloxplatform import QBloxPlatform as Device elif name == 'icarusq': from qibolab.platforms.icplatform import ICPlatform as Device else: raise_error(RuntimeError, f"Platform {name} is not supported.") return Device(name, runcard) src/qibolab/gates.py METASEP import sys import math import copy from abc import ABC, abstractmethod from qibo import gates from qibo.config import raise_error class AbstractHardwareGate(ABC): module = sys.modules[__name__] @abstractmethod def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) @abstractmethod def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) @abstractmethod def to_sequence(self, sequence): # pragma: no cover """Adds the pulses implementing the gate to the given ``PulseSequence``.""" raise_error(NotImplementedError) class H(AbstractHardwareGate, gates.H): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): q = self.target_qubits[0] composite = [RY(q, math.pi / 2), RX(q, math.pi)] pulses = [] for gate in composite: pulses.extend(gate.pulse_sequence(qubit_config, qubit_times, qubit_phases)) return pulses def duration(self, qubit_config): d = 0 q = self.target_qubits[0] composite = [RY(q, math.pi / 2), RX(q, math.pi)] for gate in composite: d += gate.duration(qubit_config) return d def to_sequence(self, sequence): q = self.target_qubits[0] sequence.add_u3(7 * math.pi / 2, math.pi, 0, q) class I(AbstractHardwareGate, gates.I): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): return [] def duration(self, qubit_config): return 0 def to_sequence(self, sequence): pass class Align(AbstractHardwareGate, gates.I): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): m = max(qubit_times[q] for q in self.target_qubits) for q in self.target_qubits: qubit_times[q] = m return [] def duration(self, qubit_config): return 0 def to_sequence(self, sequence): raise_error(NotImplementedError) class M(AbstractHardwareGate, gates.M): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): pulses = [] for q in self.target_qubits: pulses += copy.deepcopy(qubit_config[q].gates.get(self)) return pulses def duration(self, qubit_config): pulses = [] for q in self.target_qubits: pulses += copy.deepcopy(qubit_config[q].gates.get(self)) m = 0 for p in pulses: m = max(p.duration, m) return m def to_sequence(self, sequence): for q in self.target_qubits: sequence.add_measurement(q) class RX(AbstractHardwareGate, gates.RX): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): if self.parameters == 0: return [] q = self.target_qubits[0] time_mod = abs(self.parameters / math.pi) phase_mod = 0 if self.parameters > 0 else -180 phase_mod += qubit_phases[q] m = 0 pulses = copy.deepcopy(qubit_config[q].gates.get(self)) for p in pulses: duration = p.duration * time_mod p.start = qubit_times[q] p.phase += phase_mod p.duration = duration m = max(duration, m) qubit_times[q] += m return pulses def duration(self, qubit_config): q = self.target_qubits[0] time_mod = abs(self.parameters / math.pi) pulses = copy.deepcopy(qubit_config[q].gates.get(self)) m = 0 for p in pulses: m = max(p.duration * time_mod, m) return m def to_sequence(self, sequence): q = self.target_qubits[0] theta = self.parameters phi = - math.pi / 2 lam = math.pi / 2 sequence.add_u3(theta, phi, lam, q) class RY(AbstractHardwareGate, gates.RY): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): return RX.pulse_sequence(self, qubit_config, qubit_times, qubit_phases) def duration(self, qubit_config): return RX.duration(self, qubit_config) def to_sequence(self, sequence): q = self.target_qubits[0] theta = self.parameters phi = 0 lam = 0 sequence.add_u3(theta, phi, lam, q) class RZ(AbstractHardwareGate, gates.RZ): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): # apply virtually by changing ``phase`` instead of using pulses sequence.phase += self.parameters #theta = 0 #phi = self.parameters / 2 #lam = self.parameters / 2 #return sequence.add_u3(theta, phi, lam) class CNOT(AbstractHardwareGate, gates.CNOT): # CNOT gate is not tested because `qubit_config` placeholder is single qubit def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): q = self.target_qubits[0] control = self.control_qubits[0] start = max(qubit_times[q], qubit_times[control]) pulses = copy.deepcopy(qubit_config[q].gates.get(self)) for p in pulses: duration = p.duration p.start = start p.phase = qubit_phases[q] p.duration = duration qubit_times[q] = start + duration qubit_times[control] = qubit_times[q] return pulses def duration(self, qubit_config): q = self.target_qubits[0] control = self.control_qubits[0] m = 0 pulses = qubit_config[q]["gates"][self.name + "_{}".format(control)] for p in pulses: m = max(p.duration, m) return m def to_sequence(self, sequence): raise_error(NotImplementedError) class U2(AbstractHardwareGate, gates.U2): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): args = (math.pi / 2,) + self.parameters sequence.add_u3(*args) class U3(AbstractHardwareGate, gates.U3): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): sequence.add_u3(*self.parameters) class X(AbstractHardwareGate, gates.X): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): sequence.add_u3(math.pi, 0, math.pi) class Y(AbstractHardwareGate, gates.Y): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): sequence.add_u3(math.pi, 0, 0) class Z(AbstractHardwareGate, gates.Z): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): sequence.add_u3(0, math.pi, 0) src/qibolab/circuit.py METASEP from qibo import K from qibolab import states, pulses from qibo.config import raise_error from qibo.core import circuit import numpy as np class PulseSequence: """List of pulses. Holds a separate list for each instrument. """ def __init__(self): super().__init__() self.qcm_pulses = [] self.qrm_pulses = [] self.time = 0 self.phase = 0 self.pulses = [] def add(self, pulse): """Add a pulse to the sequence. Args: pulse (:class:`qibolab.pulses.Pulse`): Pulse object to add. Example: .. code-block:: python from qibolab.pulses import Pulse, ReadoutPulse from qibolab.circuit import PulseSequence from qibolab.pulse_shapes import Rectangular, Gaussian # define two arbitrary pulses pulse1 = Pulse(start=0, frequency=200000000.0, amplitude=0.3, duration=60, phase=0, shape=Gaussian(60 / 5))) pulse2 = ReadoutPulse(start=70, frequency=20000000.0, amplitude=0.5, duration=3000, phase=0, shape=Rectangular())) # define the pulse sequence sequence = PulseSequence() # add pulses to the pulse sequence sequence.add(pulse1) sequence.add(pulse2) """ if pulse.channel == "qrm" or pulse.channel == 1: self.qrm_pulses.append(pulse) else: self.qcm_pulses.append(pulse) self.pulses.append(pulse) def add_u3(self, theta, phi, lam, qubit=0): """Add pulses that implement a U3 gate. Args: theta, phi, lam (float): Parameters of the U3 gate. """ from qibolab.pulse_shapes import Gaussian # Pi/2 pulse from calibration if hasattr(K.platform, "qubits"): kwargs = K.platform.fetch_qubit_pi_pulse(qubit) else: kwargs = { "amplitude": K.platform.pi_pulse_amplitude, "duration": K.platform.pi_pulse_duration, "frequency": K.platform.pi_pulse_frequency } kwargs["duration"] = kwargs["duration"] // 2 delay = K.platform.delay_between_pulses duration = kwargs.get("duration") kwargs["shape"] = Gaussian(duration / 5) self.phase += phi - np.pi / 2 kwargs["start"] = self.time kwargs["phase"] = self.phase self.add(pulses.Pulse(**kwargs)) self.time += duration + delay self.phase += np.pi - theta kwargs["start"] = self.time kwargs["phase"] = self.phase self.add(pulses.Pulse(**kwargs)) self.time += duration + delay self.phase += lam - np.pi / 2 def add_measurement(self, qubit=0): """Add measurement pulse.""" from qibolab.pulse_shapes import Rectangular if hasattr(K.platform, "qubits"): kwargs = K.platform.fetch_qubit_readout_pulse(qubit) else: kwargs = K.platform.readout_pulse kwargs["start"] = self.time + K.platform.delay_before_readout kwargs["phase"] = self.phase kwargs["shape"] = Rectangular() self.add(pulses.ReadoutPulse(**kwargs)) class HardwareCircuit(circuit.Circuit): def __init__(self, nqubits): if nqubits > 1: raise ValueError("Device has only one qubit.") super().__init__(nqubits) def execute(self, initial_state=None, nshots=None): if initial_state is not None: raise_error(ValueError, "Hardware backend does not support " "initial state in circuits.") if self.measurement_gate is None: raise_error(RuntimeError, "No measurement register assigned.") # Translate gates to pulses and create a ``PulseSequence`` sequence = PulseSequence() for gate in self.queue: gate.to_sequence(sequence) self.measurement_gate.to_sequence(sequence) # Execute the pulse sequence on the platform K.platform.connect() K.platform.setup() K.platform.start() readout = K.platform(sequence, nshots) K.platform.stop() if hasattr(K.platform, "qubits"): q = self.measurement_gate.target_qubits[0] qubit = K.platform.fetch_qubit(q) min_v = qubit.min_readout_voltage max_v = qubit.max_readout_voltage else: min_v = K.platform.min_readout_voltage max_v = K.platform.max_readout_voltage return states.HardwareState.from_readout(readout, min_v, max_v) src/qibolab/backend.py METASEP import os from qibolab.platform import Platform from qibo.backends.numpy import NumpyBackend from qibo.config import raise_error class QibolabBackend(NumpyBackend): # pragma: no cover description = "" # TODO: Write proper description def __init__(self): super().__init__() self.name = "qibolab" self.custom_gates = True self.is_hardware = True self.platform = self.set_platform(os.environ.get("QIBOLAB_PLATFORM", "tiiq")) def set_platform(self, platform): self.platform = Platform(platform) def get_platform(self): return self.platform.name def circuit_class(self, accelerators=None, density_matrix=False): if accelerators is not None: raise_error(NotImplementedError, "Hardware backend does not support " "multi-GPU configuration.") if density_matrix: raise_error(NotImplementedError, "Hardware backend does not support " "density matrix simulation.") from qibolab.circuit import HardwareCircuit return HardwareCircuit def create_gate(self, cls, *args, **kwargs): from qibolab import gates return getattr(gates, cls.__name__)(*args, **kwargs) def create_einsum_cache(self, qubits, nqubits, ncontrol=None): # pragma: no cover raise_error(NotImplementedError, "`create_einsum_cache` method is " "not required for hardware backends.") def einsum_call(self, cache, state, matrix): # pragma: no cover raise_error(NotImplementedError, "`einsum_call` method is not required " "for hardware backends.") src/qibolab/__init__.py METASEP __version__ = "0.0.1.dev1" from qibolab.platform import Platform doc/source/conf.py METASEP # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('.')) import qibolab # -- Project information ----------------------------------------------------- project = 'qibolab' copyright = '2021, The Qibo team' author = 'The Qibo team' release = qibolab.__version__ # -- General configuration --------------------------------------------------- # https://stackoverflow.com/questions/56336234/build-fail-sphinx-error-contents-rst-not-found # master_doc = "index" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx', 'recommonmark', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] examples/tii_single_qubit/main.py METASEP import yaml import time from diagnostics import run_resonator_spectroscopy, \ run_qubit_spectroscopy, \ run_rabi_pulse_length, \ run_rabi_pulse_gain, \ run_rabi_pulse_length_and_gain, \ run_rabi_pulse_length_and_amplitude, \ run_t1, \ run_ramsey, \ run_spin_echo if __name__ == "__main__": with open("settings.yaml", "r") as file: settings = yaml.safe_load(file) resonator_freq = 7798070000.0 qubit_freq = 8726500000.0 pi_pulse_length = 45 pi_pulse_gain = 0.14 pi_pulse_amplitude = 0.9 print("\nRun resonator spectroscopy.\n") resonator_freq, _ = run_resonator_spectroscopy(**settings["resonator_spectroscopy"]) print("\nRun qubit spectroscopy.\n") qubit_freq, _ = run_qubit_spectroscopy(resonator_freq, **settings["qubit_spectroscopy"]) print("\nRun Rabi pulse length.\n") run_rabi_pulse_length(resonator_freq, qubit_freq) print("\nRun Rabi pulse gain.\n") run_rabi_pulse_gain(resonator_freq, qubit_freq) print("\nRun Rabi pulse length and gain.\n") run_rabi_pulse_length_and_gain(resonator_freq, qubit_freq) print("\nRun Rabi pulse length and amplitude.\n") run_rabi_pulse_length_and_amplitude(resonator_freq, qubit_freq) print("\nRun t1.\n") run_t1(resonator_freq, qubit_freq, pi_pulse_gain, pi_pulse_length, **settings["t1"]) print("\nRun ramsey.\n") run_ramsey(resonator_freq, qubit_freq, pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude, **settings["ramsey"]) print("\nRun Spin Echo.\n") run_spin_echo(resonator_freq, qubit_freq, pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude, **settings["spin_echo"]) print("\nDiagnostics completed.\n") time.sleep(360) examples/tii_single_qubit/fitting.py METASEP import pathlib import numpy as np from scipy.optimize import curve_fit import matplotlib.pyplot as plt import os from quantify_core.analysis.base_analysis import BaseAnalysis from quantify_core.data.handling import set_datadir import lmfit import numpy as np def lorentzian_fit(label, peak): #label = directory where hdf5 data file generated by MC is located. #label=last --> Read most recent hdf5 #label=/path/to/directory/ --> read the hdf5 data file contained in "label" voltage, x_axis, data, d = data_post(label) frequency = x_axis #Create a lmfit model for fitting equation defined in resonator_peak model_Q = lmfit.Model(resonator_peak) #Guess parameters for Lorentzian max or min #to guess center if peak == max: guess_center = frequency[np.argmax(voltage)] #Argmax = Returns the indices of the maximum values along an axis. else: guess_center = frequency[np.argmin(voltage)] #Argmin = Returns the indices of the minimum values along an axis. #to guess the sigma if peak == max: voltage_min_i = np.argmin(voltage) frequency_voltage_min = frequency[voltage_min_i] guess_sigma = abs(frequency_voltage_min - guess_center) #500KHz*1e-9 else: guess_sigma = 5e-03 #500KHz*1e-9 #to guess the amplitude if peak == max: voltage_max = np.max(voltage) guess_amp = voltage_max*guess_sigma*np.pi else: voltage_min = np.min(voltage) guess_amp = -voltage_min*guess_sigma*np.pi #to guess the offset if peak == max: guess_offset = 0 else: guess_offset = voltage[0]*-2.5*1e5 #Add guessed parameters to the model if peak == max: model_Q.set_param_hint('center',value=guess_center,vary=True) else: model_Q.set_param_hint('center',value=guess_center,vary=False) model_Q.set_param_hint('sigma',value=guess_sigma, vary=True) model_Q.set_param_hint('amplitude',value=guess_amp, vary=True) model_Q.set_param_hint('offset',value=guess_offset, vary=True) guess_parameters = model_Q.make_params() guess_parameters #fit the model with the data and guessed parameters fit_res = model_Q.fit(data=voltage,frequency=frequency,params=guess_parameters) #print(fit_res.fit_report()) #fit_res.best_values #get the values for postprocessing and for legend. f0 = fit_res.best_values['center']/1e9 BW = (fit_res.best_values['sigma']*2)/1e9 Q = abs(f0/BW) #plot the fitted curve dummy_frequencies = np.linspace(np.amin(frequency),np.amax(frequency),101) fit_fine = resonator_peak(dummy_frequencies,**fit_res.best_values) fig,ax = plt.subplots(1,1,figsize=(8,3)) ax.plot(data.x0,data.y0*1e3,'o',label='Data') ax.plot(dummy_frequencies,fit_fine*1e3,'r-', label=r"Fit $f_0$ ={:.4f} GHz" "\n" " $Q$ ={:.0f}".format(f0,Q)) ax.set_ylabel('Integrated Voltage (mV)') ax.set_xlabel('Frequency (GHz)') ax.legend() plt.show() fig.savefig(pathlib.Path("data") / 'Resonator_Spectroscopy.pdf',format='pdf') #fit_res.plot_fit(show_init=True) return f0, BW, Q def rabi_fit(dataset): pguess = [ np.mean(dataset['y0'].values), np.max(dataset['y0'].values) - np.min(dataset['y0'].values), 0.5/dataset['x0'].values[np.argmin(dataset['y0'].values)], np.pi/2, 0.1e-6 ] popt, pcov = curve_fit(rabi, dataset['x0'].values, dataset['y0'].values, p0=pguess) smooth_dataset = rabi(dataset['x0'].values, *popt) pi_pulse_duration = np.abs((1.0 / popt[2]) / 2) rabi_oscillations_pi_pulse_min_voltage = smooth_dataset.min() * 1e6 t1 = 1.0 / popt[4] #double check T1 return smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 def t1_fit(dataset): pguess = [ max(dataset['y0'].values), (max(dataset['y0'].values) - min(dataset['y0'].values)), 1/250 ] popt, pcov = curve_fit(exp, dataset['x0'].values, dataset['y0'].values, p0=pguess) smooth_dataset = exp(dataset['x0'].values, *popt) t1 = abs(1/popt[2]) return smooth_dataset, t1 def ramsey_fit(dataset): pguess = [ np.mean(dataset['y0'].values), np.max(dataset['y0'].values) - np.min(dataset['y0'].values), 0.5/dataset['x0'].values[np.argmin(dataset['y0'].values)], np.pi/2, 0.1e-6 ] popt, pcov = curve_fit(ramsey, dataset['x0'].values, dataset['y0'].values, p0=pguess) smooth_dataset = ramsey(dataset['x0'].values, *popt) delta_frequency = popt[2] t2 = 1.0 / popt[4] return smooth_dataset, delta_frequency, t2 def resonator_peak(frequency,amplitude,center,sigma,offset): #http://openafox.com/science/peak-function-derivations.html return (amplitude/np.pi) * (sigma/((frequency-center)**2 + sigma**2) + offset) def rabi(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # Period T : 1/p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] #return p[0] + p[1] * np.sin(2 * np.pi / p[2] * x + p[3]) * np.exp(-x / p[4]) return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(- x * p4) def ramsey(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # DeltaFreq : p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] #return p[0] + p[1] * np.sin(2 * np.pi / p[2] * x + p[3]) * np.exp(-x / p[4]) return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(- x * p4) def exp(x,*p) : return p[0] - p[1]*np.exp(-1 * x * p[2]) #Read last hdf5 file generated by the mc or specify the directory def data_post(dir = "last"): if dir == "last": #get last measured file directory = 'data/quantify' directory = max([subdir for subdir, dirs, files in os.walk(directory)], key=os.path.getmtime) label = os.path.basename(os.path.normpath(directory)) else: label = dir set_datadir('data/quantify') d = BaseAnalysis(tuid=label) d.run() data = d.dataset # #clean the array arr1 = data.y0; voltage = [None] * len(arr1); for i in range(0, len(arr1)): voltage[i] = float(arr1[i]); arr1 = data.x0; x_axis = [None] * len(arr1); for i in range(0, len(arr1)): x_axis[i] = float(arr1[i]); plt.plot(x_axis,voltage) #plt.show() return voltage, x_axis, data, d examples/tii_single_qubit/diagnostics.py METASEP import pathlib import numpy as np import matplotlib.pyplot as plt import yaml # TODO: Have a look in the documentation of ``MeasurementControl`` from quantify_core.measurement import MeasurementControl from quantify_core.measurement.control import Gettable, Settable from quantify_core.data.handling import set_datadir from scipy.signal import savgol_filter # TODO: Check why this set_datadir is needed set_datadir(pathlib.Path(__file__).parent / "data" / "quantify") def backup_config_file(): import os import shutil import errno from datetime import datetime original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml')) now = datetime.now() now = now.strftime("%d%m%Y%H%M%S") destination_file_name = "tiiq_" + now + ".yml" target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name)) try: print("Copying file: " + original) print("Destination file" + target) shutil.copyfile(original, target) print("Platform settings backup done") except IOError as e: # ENOENT(2): file does not exist, raised also on missing dest parent dir if e.errno != errno.ENOENT: raise # try creating parent directories os.makedirs(os.path.dirname(target)) shutil.copy(original, target) def get_config_parameter(dictID, dictID1, key): import os calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml')) with open(calibration_path) as file: settings = yaml.safe_load(file) file.close() if (not dictID1): return settings[dictID][key] else: return settings[dictID][dictID1][key] def save_config_parameter(dictID, dictID1, key, value): import os calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml')) with open(calibration_path, "r") as file: settings = yaml.safe_load(file) file.close() if (not dictID1): settings[dictID][key] = value print("Saved value: " + str(settings[dictID][key])) else: settings[dictID][dictID1][key] = value print("Saved value: " + str(settings[dictID][dictID1][key])) with open(calibration_path, "w") as file: settings = yaml.dump(settings, file, sort_keys=False, indent=4) file.close() def plot(smooth_dataset, dataset, label, type): if (type == 0): #cavity plots fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61)) ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0') ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1') ax.title.set_text(label) ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2') plt.savefig(pathlib.Path("data") / f"{label}.pdf") return if (type == 1): #qubit spec, rabi, ramsey, t1 plots fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61)) ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0') ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1') ax.title.set_text(label) ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2') plt.savefig(pathlib.Path("data") / f"{label}.pdf") return def create_measurement_control(name): import os if os.environ.get("ENABLE_PLOTMON", True): mc = MeasurementControl(f'MC {name}') from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}') plotmon.tuids_max_num(3) mc.instr_plotmon(plotmon.name) from quantify_core.visualization.instrument_monitor import InstrumentMonitor insmon = InstrumentMonitor(f"Instruments Monitor {name}") mc.instrument_monitor(insmon.name) return mc, plotmon, insmon else: mc = MeasurementControl(f'MC {name}') return mc, None, None class ROController(): # Quantify Gettable Interface Implementation label = ['Amplitude', 'Phase','I','Q'] unit = ['V', 'Radians','V','V'] name = ['A', 'Phi','I','Q'] def __init__(self, platform, sequence): self.platform = platform self.sequence = sequence def get(self): return self.platform.execute(self.sequence) def variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step): #[. . . . . .][...................]0[...................][. . . . . .] #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------] #>. .< lowres_step # >..< highres_step # ^ centre value = 0 scanrange = np.concatenate( ( np.arange(-lowres_width,-highres_width,lowres_step), np.arange(-highres_width,highres_width,highres_step), np.arange(highres_width,lowres_width,lowres_step) ) ) return scanrange def run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, lowres_width, lowres_step, highres_width, highres_step, precision_width, precision_step): #Fast Sweep platform.software_averages = 1 scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + platform.LO_qrm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() platform.LO_qcm.off() dataset = mc.run("Resonator Spectroscopy Fast", soft_avg=platform.software_averages) platform.stop() platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values]) avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6 # Precision Sweep platform.software_averages = 1 scanrange = np.arange(-precision_width, precision_width, precision_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + platform.LO_qrm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() platform.LO_qcm.off() dataset = mc.run("Resonator Spectroscopy Precision", soft_avg=platform.software_averages) platform.stop() smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2) resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency max_ro_voltage = smooth_dataset.max() * 1e6 print(f"\nResonator Frequency = {resonator_freq}") return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset def run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): platform.software_averages = 1 scanrange = np.arange(-precision_width, precision_width, precision_step) scanrange = scanrange + platform.LO_qrm.get_frequency() mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))]) setpoints_gain = np.arange(10, 100, 10) mc.setpoints_grid([scanrange, setpoints_gain]) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() platform.LO_qcm.off() dataset = mc.run("Punchout", soft_avg=platform.software_averages) platform.stop() smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2) #FIXME: Code Lorentzian fitting for cavity spec and punchout resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency print(f"\nResonator Frequency = {resonator_freq}") print(f"\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}") return resonator_freq, smooth_dataset, dataset def run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, fast_start, fast_end, fast_step, precision_start, precision_end, precision_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.software_averages = 1 # Fast Sweep fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step) mc.settables(platform.LO_qcm.device.frequency) mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Qubit Spectroscopy Fast", soft_avg=platform.software_averages) platform.stop() # Precision Sweep platform.software_averages = 1 precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step) mc.settables(platform.LO_qcm.device.frequency) mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Qubit Spectroscopy Precision", soft_avg=platform.software_averages) platform.stop() smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2) qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency min_ro_voltage = smooth_dataset.min() * 1e6 return qubit_freq, min_ro_voltage, smooth_dataset, dataset def run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency) platform.software_averages = 3 mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse))) mc.setpoints(np.arange(1, 400, 1)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages) platform.stop() return dataset, platform.qcm.gain def run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse): #qubit pulse duration=200 platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.software_averages = 3 mc.settables(Settable(QCPulseGainParameter(platform.qcm))) mc.setpoints(np.arange(0, 100, 10)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages) platform.stop() return dataset def run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.software_averages = 1 mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)), Settable(QCPulseGainParameter(platform.qcm))]) setpoints_length = np.arange(1, 400, 10) setpoints_gain = np.arange(0, 20, 1) mc.setpoints_grid([setpoints_length, setpoints_gain]) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages) # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain # platform.pi_pulse_length = # platform.pi_pulse_gain = platform.stop() return dataset def run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.software_averages = 1 mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)), Settable(QCPulseAmplitudeParameter(qc_pulse))]) setpoints_length = np.arange(1, 1000, 2) setpoints_amplitude = np.arange(0, 100, 2) mc.setpoints_grid([setpoints_length, setpoints_amplitude]) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages) # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain # platform.pi_pulse_length = # platform.pi_pulse_gain = platform.stop() return dataset def run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, delay_before_readout_end, delay_before_readout_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.qcm.gain = pi_pulse_gain mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse))) mc.setpoints(np.arange(delay_before_readout_start, delay_before_readout_end, delay_before_readout_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('T1', soft_avg = platform.software_averages) platform.stop() return dataset def run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.qcm.gain = pi_pulse_gain mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration))) mc.setpoints(np.arange(start_start, start_end, start_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Ramsey', soft_avg = platform.software_averages) platform.stop() return dataset def run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude, start_start, start_end, start_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.qcm.gain = pi_pulse_gain mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length))) mc.setpoints(np.arange(start_start, start_end, start_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Spin Echo', soft_avg = platform.software_averages) platform.stop() return dataset # Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout def run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse, pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude, start_start, start_end, start_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.qcm.gain = pi_pulse_gain mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length)) mc.setpoints(np.arange(start_start, start_end, start_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages) platform.stop() return dataset def run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, lowres_width, lowres_step, highres_width, highres_step, precision_width, precision_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) # Fast Sweep platform.software_averages = 1 scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + platform.LO_qrm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Resonator Spectroscopy Shifted Fast", soft_avg=platform.software_averages) platform.stop() shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values] # Precision Sweep platform.software_averages = 1 scanrange = np.arange(-precision_width, precision_width, precision_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + shifted_LO_frequency) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Resonator Spectroscopy Shifted Precision", soft_avg=platform.software_averages) platform.stop() smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2) shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency shifted_max_ro_voltage = smooth_dataset.max() * 1e6 print('\n') print(f"\nResonator Frequency = {shifted_frequency}") print(f"Maximum Voltage Measured = {shifted_max_ro_voltage} μV") return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset # help classes class QCPulseLengthParameter(): label = 'Qubit Control Pulse Length' unit = 'ns' name = 'qc_pulse_length' def __init__(self, ro_pulse, qc_pulse): self.ro_pulse = ro_pulse self.qc_pulse = qc_pulse def set(self, value): self.qc_pulse.duration = value self.ro_pulse.start = value + 4 class QCPulseGainParameter(): label = 'Qubit Control Gain' unit = '%' name = 'qc_pulse_gain' def __init__(self, qcm): self.qcm = qcm def set(self,value): self.qcm.gain = value / 100 class QCPulseAmplitudeParameter(): label = 'Qubit Control Pulse Amplitude' unit = '%' name = 'qc_pulse_amplitude' def __init__(self, qc_pulse): self.qc_pulse = qc_pulse def set(self, value): self.qc_pulse.amplitude = value / 100 class T1WaitParameter(): label = 'Time' unit = 'ns' name = 't1_wait' initial_value = 0 def __init__(self, ro_pulse, qc_pulse): self.ro_pulse = ro_pulse self.base_duration = qc_pulse.duration def set(self, value): # TODO: implement following condition #must be >= 4ns <= 65535 #platform.delay_before_readout = value self.ro_pulse.start = self.base_duration + 4 + value class RamseyWaitParameter(): label = 'Time' unit = 'ns' name = 'ramsey_wait' initial_value = 0 def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length): self.ro_pulse = ro_pulse self.qc2_pulse = qc2_pulse self.pi_pulse_length = pi_pulse_length def set(self, value): self.qc2_pulse.start = self.pi_pulse_length // 2 + value self.ro_pulse.start = self.pi_pulse_length + value + 4 class SpinEchoWaitParameter(): label = 'Time' unit = 'ns' name = 'spin_echo_wait' initial_value = 0 def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length): self.ro_pulse = ro_pulse self.qc2_pulse = qc2_pulse self.pi_pulse_length = pi_pulse_length def set(self, value): self.qc2_pulse.start = self.pi_pulse_length//2 + value self.ro_pulse.start = 3 * self.pi_pulse_length//2 + 2 * value + 4 class SpinEcho3PWaitParameter(): label = 'Time' unit = 'ns' name = 'spin_echo_wait' initial_value = 0 def __init__(self, ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length): self.ro_pulse = ro_pulse self.qc2_pulse = qc2_pulse self.qc3_pulse = qc3_pulse self.pi_pulse_length = pi_pulse_length def set(self,value): self.qc2_pulse.start = self.pi_pulse_length//2 + value self.qc3_pulse.start = (3 * self.pi_pulse_length)//2 + 2 * value self.ro_pulse.start = 2 * self.pi_pulse_length + 2 * value + 4 class QRPulseGainParameter(): label = 'Qubit Readout Gain' unit = '%' name = 'ro_pulse_gain' def __init__(self, qrm): self.qrm = qrm def set(self,value): self.qrm.gain = value / 100 setup.py METASEP # Installation script for python from setuptools import setup, find_packages import os import re PACKAGE = "qibolab" # Returns the version def get_version(): """ Gets the version from the package's __init__ file if there is some problem, let it happily fail """ VERSIONFILE = os.path.join("src", PACKAGE, "__init__.py") initfile_lines = open(VERSIONFILE, "rt").readlines() VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]" for line in initfile_lines: mo = re.search(VSRE, line, re.M) if mo: return mo.group(1) # load long description from README this_directory = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f: long_description = f.read() setup( name=PACKAGE, version=get_version(), description="Quantum hardware backend for Qibo", author="The Qibo team", author_email="", url="https://github.com/qiboteam/qibolab", packages=find_packages("src"), package_dir={"": "src"}, package_data={"": ["*.json", "*.npy"]}, zip_safe=False, classifiers=[ "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering :: Physics", ], install_requires=[ "qibo", "visa", "pyvisa-py", "qcodes", ], extras_require={ "docs": [ "sphinx", "sphinx_rtd_theme", "recommonmark", "sphinxcontrib-bibtex", "sphinx_markdown_tables", "nbsphinx", "IPython"], # TII system dependencies "tiiq": [ "qblox-instruments==0.5.4", "qcodes==0.29.1", "lmfit", "quantify-core==0.5.1", "pyVISA==1.11.3", "pyVISA-py==0.5.2", ] }, python_requires=">=3.6.0", long_description=long_description, long_description_content_type='text/markdown', ) examples/qili_single_qubit/diagnostics.py METASEP
[ { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value\n self.ro_pulse.start = self.base_duration + 4 + value\n\nclass RamseyWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'ramsey_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length // 2 + value\n self.ro_pulse.start = self.pi_pulse_length + value + 4\n\nclass SpinEchoWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value\n self.ro_pulse.start = 3 * self.pi_pulse_length//2 + 2 * value + 4\n\nclass SpinEcho3PWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n \n def __init__(self, ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.qc3_pulse = qc3_pulse\n self.pi_pulse_length = pi_pulse_length\n \n def set(self,value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value\n self.qc3_pulse.start = (3 * self.pi_pulse_length)//2 + 2 * value\n self.ro_pulse.start = 2 * self.pi_pulse_length + 2 * value + 4\n\nclass QRPulseGainParameter():\n\n label = 'Qubit Readout Gain'\n unit = '%'\n name = 'ro_pulse_gain'\n\n def __init__(self, qrm):", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value\n self.ro_pulse.start = self.base_duration + 4 + value\n\nclass RamseyWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'ramsey_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length // 2 + value\n self.ro_pulse.start = self.pi_pulse_length + value + 4\n\nclass SpinEchoWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value\n self.ro_pulse.start = 3 * self.pi_pulse_length//2 + 2 * value + 4\n\nclass SpinEcho3PWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n \n def __init__(self, ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.qc3_pulse = qc3_pulse\n self.pi_pulse_length = pi_pulse_length\n \n def set(self,value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value\n self.qc3_pulse.start = (3 * self.pi_pulse_length)//2 + 2 * value\n self.ro_pulse.start = 2 * self.pi_pulse_length + 2 * value + 4\n\nclass QRPulseGainParameter():\n\n label = 'Qubit Readout Gain'\n unit = '%'\n name = 'ro_pulse_gain'\n\n def __init__(self, qrm):\n self.qrm = qrm\n\n def set(self,value):", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))", "type": "common" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value", "type": "common" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value\n self.ro_pulse.start = self.base_duration + 4 + value\n\nclass RamseyWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'ramsey_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):", "type": "common" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value\n self.ro_pulse.start = self.base_duration + 4 + value\n\nclass RamseyWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'ramsey_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length // 2 + value", "type": "common" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value\n self.ro_pulse.start = self.base_duration + 4 + value\n\nclass RamseyWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'ramsey_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length // 2 + value\n self.ro_pulse.start = self.pi_pulse_length + value + 4\n\nclass SpinEchoWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value\n self.ro_pulse.start = 3 * self.pi_pulse_length//2 + 2 * value + 4\n\nclass SpinEcho3PWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n \n def __init__(self, ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.qc3_pulse = qc3_pulse\n self.pi_pulse_length = pi_pulse_length\n \n def set(self,value):", "type": "common" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value\n self.ro_pulse.start = self.base_duration + 4 + value\n\nclass RamseyWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'ramsey_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length // 2 + value\n self.ro_pulse.start = self.pi_pulse_length + value + 4\n\nclass SpinEchoWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value", "type": "common" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value\n self.ro_pulse.start = self.base_duration + 4 + value\n\nclass RamseyWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'ramsey_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length // 2 + value\n self.ro_pulse.start = self.pi_pulse_length + value + 4\n\nclass SpinEchoWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value\n self.ro_pulse.start = 3 * self.pi_pulse_length//2 + 2 * value + 4\n\nclass SpinEcho3PWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n \n def __init__(self, ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.qc3_pulse = qc3_pulse\n self.pi_pulse_length = pi_pulse_length\n \n def set(self,value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value", "type": "common" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value\n self.ro_pulse.start = self.base_duration + 4 + value\n\nclass RamseyWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'ramsey_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length // 2 + value\n self.ro_pulse.start = self.pi_pulse_length + value + 4\n\nclass SpinEchoWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value\n self.ro_pulse.start = 3 * self.pi_pulse_length//2 + 2 * value + 4\n\nclass SpinEcho3PWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n \n def __init__(self, ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.qc3_pulse = qc3_pulse\n self.pi_pulse_length = pi_pulse_length\n \n def set(self,value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value\n self.qc3_pulse.start = (3 * self.pi_pulse_length)//2 + 2 * value", "type": "common" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):", "type": "common" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os", "type": "commited" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value\n self.ro_pulse.start = self.base_duration + 4 + value\n\nclass RamseyWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'ramsey_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length // 2 + value\n self.ro_pulse.start = self.pi_pulse_length + value + 4\n\nclass SpinEchoWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value\n self.ro_pulse.start = 3 * self.pi_pulse_length//2 + 2 * value + 4\n\nclass SpinEcho3PWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'spin_echo_wait'\n initial_value = 0\n \n def __init__(self, ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.qc3_pulse = qc3_pulse\n self.pi_pulse_length = pi_pulse_length\n \n def set(self,value):\n self.qc2_pulse.start = self.pi_pulse_length//2 + value\n self.qc3_pulse.start = (3 * self.pi_pulse_length)//2 + 2 * value\n self.ro_pulse.start = 2 * self.pi_pulse_length + 2 * value + 4\n\nclass QRPulseGainParameter():\n\n label = 'Qubit Readout Gain'\n unit = '%'\n name = 'ro_pulse_gain'\n", "type": "non_informative" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()", "type": "non_informative" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n", "type": "non_informative" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n", "type": "non_informative" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n", "type": "random" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n", "type": "random" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)", "type": "random" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir", "type": "random" }, { "content": "import pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport yaml\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\nfrom quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\n\n# TODO: Check why this set_datadir is needed\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\ndef backup_config_file():\n import os\n import shutil\n import errno\n from datetime import datetime\n original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n now = datetime.now()\n now = now.strftime(\"%d%m%Y%H%M%S\")\n destination_file_name = \"tiiq_\" + now + \".yml\" \n target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name))\n\n try:\n print(\"Copying file: \" + original)\n print(\"Destination file\" + target)\n shutil.copyfile(original, target)\n print(\"Platform settings backup done\")\n except IOError as e:\n # ENOENT(2): file does not exist, raised also on missing dest parent dir\n if e.errno != errno.ENOENT:\n raise\n # try creating parent directories\n os.makedirs(os.path.dirname(target))\n shutil.copy(original, target)\n\ndef get_config_parameter(dictID, dictID1, key):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path) as file:\n settings = yaml.safe_load(file)\n file.close() \n\n if (not dictID1):\n return settings[dictID][key]\n else:\n return settings[dictID][dictID1][key]\n\ndef save_config_parameter(dictID, dictID1, key, value):\n import os\n calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml'))\n with open(calibration_path, \"r\") as file:\n settings = yaml.safe_load(file)\n file.close()\n \n if (not dictID1):\n settings[dictID][key] = value\n print(\"Saved value: \" + str(settings[dictID][key]))\n\n else:\n settings[dictID][dictID1][key] = value\n print(\"Saved value: \" + str(settings[dictID][dictID1][key]))\n\n with open(calibration_path, \"w\") as file:\n settings = yaml.dump(settings, file, sort_keys=False, indent=4)\n file.close()\n\ndef plot(smooth_dataset, dataset, label, type):\n if (type == 0): #cavity plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\n if (type == 1): #qubit spec, rabi, ramsey, t1 plots\n fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61))\n ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')\n ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')\n ax.title.set_text(label)\n ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')\n plt.savefig(pathlib.Path(\"data\") / f\"{label}.pdf\")\n return\n\ndef create_measurement_control(name):\n import os\n if os.environ.get(\"ENABLE_PLOTMON\", True):\n mc = MeasurementControl(f'MC {name}')\n from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt\n plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}')\n plotmon.tuids_max_num(3)\n mc.instr_plotmon(plotmon.name)\n from quantify_core.visualization.instrument_monitor import InstrumentMonitor\n insmon = InstrumentMonitor(f\"Instruments Monitor {name}\")\n mc.instrument_monitor(insmon.name)\n return mc, plotmon, insmon\n else:\n mc = MeasurementControl(f'MC {name}')\n return mc, None, None\n\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):\n return self.platform.execute(self.sequence)\n\n\ndef variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step):\n #[. . . . . .][...................]0[...................][. . . . . .]\n #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------]\n #>. .< lowres_step\n # >..< highres_step\n # ^ centre value = 0\n scanrange = np.concatenate(\n ( np.arange(-lowres_width,-highres_width,lowres_step),\n np.arange(-highres_width,highres_width,highres_step),\n np.arange(highres_width,lowres_width,lowres_step)\n )\n )\n return scanrange\n\ndef run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, \n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n #Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\ndef run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): \n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n scanrange = scanrange + platform.LO_qrm.get_frequency()\n\n mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])\n setpoints_gain = np.arange(10, 100, 10)\n mc.setpoints_grid([scanrange, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n #FIXME: Code Lorentzian fitting for cavity spec and punchout\n resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency \n print(f\"\\nResonator Frequency = {resonator_freq}\")\n print(f\"\\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}\")\n\n return resonator_freq, smooth_dataset, dataset\n\ndef run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, \n fast_start, fast_end, fast_step,\n precision_start, precision_end, precision_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.software_averages = 1\n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\ndef run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(1, 400, 1))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset, platform.qcm.gain\n\ndef run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n #qubit pulse duration=200\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 3\n mc.settables(Settable(QCPulseGainParameter(platform.qcm)))\n mc.setpoints(np.arange(0, 100, 10))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseGainParameter(platform.qcm))])\n setpoints_length = np.arange(1, 400, 10)\n setpoints_gain = np.arange(0, 20, 1)\n mc.setpoints_grid([setpoints_length, setpoints_gain])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.software_averages = 1\n mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),\n Settable(QCPulseAmplitudeParameter(qc_pulse))])\n setpoints_length = np.arange(1, 1000, 2)\n setpoints_amplitude = np.arange(0, 100, 2)\n mc.setpoints_grid([setpoints_length, setpoints_amplitude])\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)\n # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain\n # platform.pi_pulse_length =\n # platform.pi_pulse_gain =\n platform.stop()\n \n return dataset\n\ndef run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, \n delay_before_readout_end, delay_before_readout_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, \n pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n\n mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Ramsey', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)\n platform.stop()\n \n return dataset\n\n# Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout\ndef run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse,\n pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,\n start_start, start_end, start_step):\n \n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n platform.qcm.gain = pi_pulse_gain\n mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))\n mc.setpoints(np.arange(start_start, start_end, start_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)\n platform.stop()\n\n return dataset\n\ndef run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse,\n lowres_width, lowres_step, highres_width, highres_step,\n precision_width, precision_step):\n\n platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)\n\n # Fast Sweep\n platform.software_averages = 1\n scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)\n platform.stop()\n\n shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]\n\n # Precision Sweep\n platform.software_averages = 1\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + shifted_LO_frequency)\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)\n platform.stop()\n\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n shifted_max_ro_voltage = smooth_dataset.max() * 1e6\n print('\\n')\n print(f\"\\nResonator Frequency = {shifted_frequency}\")\n print(f\"Maximum Voltage Measured = {shifted_max_ro_voltage} μV\")\n\n return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset\n\n\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass QCPulseGainParameter():\n\n label = 'Qubit Control Gain'\n unit = '%'\n name = 'qc_pulse_gain'\n\n def __init__(self, qcm):\n self.qcm = qcm\n\n def set(self,value):\n self.qcm.gain = value / 100\n\nclass QCPulseAmplitudeParameter():\n\n label = 'Qubit Control Pulse Amplitude'\n unit = '%'\n name = 'qc_pulse_amplitude'\n\n def __init__(self, qc_pulse):\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.amplitude = value / 100\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value\n self.ro_pulse.start = self.base_duration + 4 + value\n\nclass RamseyWaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 'ramsey_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length):\n self.ro_pulse = ro_pulse\n self.qc2_pulse = qc2_pulse\n self.pi_pulse_length = pi_pulse_length\n\n def set(self, value):\n self.qc2_pulse.start = self.pi_pulse_length // 2 + value\n self.ro_pulse.start = self.pi_pulse_length + value + 4\n\nclass SpinEchoWaitParameter():\n label = 'Time'\n unit = 'ns'", "type": "random" } ]
[ " mc.gettables(Gettable(ROController(platform, sequence)))", " scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)", " mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)),", " Settable(QCPulseAmplitudeParameter(qc_pulse))])", " mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse)))", " ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0')", " ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1')", " mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length)))", " ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2')", " ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2')", " mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length))", " mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))", " shutil.copy(original, target)", " file.close()", " platform.software_averages = 1", " mc.settables(platform.LO_qrm.device.frequency)", " mc.setpoints(scanrange + platform.LO_qrm.get_frequency())", " platform.LO_qcm.off()", " dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=platform.software_averages)", " platform.stop()", " platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])", " dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=platform.software_averages)", " scanrange = scanrange + platform.LO_qrm.get_frequency()", " mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))])", " dataset = mc.run(\"Punchout\", soft_avg=platform.software_averages)", " platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency)", " mc.settables(platform.LO_qcm.device.frequency)", " mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())", " dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=platform.software_averages)", " mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())", " dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=platform.software_averages)", " platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency)", " platform.software_averages = 3", " dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages)", " return dataset, platform.qcm.gain", " platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency)", " mc.settables(Settable(QCPulseGainParameter(platform.qcm)))", " dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages)", " Settable(QCPulseGainParameter(platform.qcm))])", " dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages)", " pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, ", " platform.qcm.gain = pi_pulse_gain", " dataset = mc.run('T1', soft_avg = platform.software_averages)", " pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step):", " mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration)))", " dataset = mc.run('Ramsey', soft_avg = platform.software_averages)", " pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude,", " dataset = mc.run('Spin Echo', soft_avg = platform.software_averages)", " dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages)", " dataset = mc.run(\"Resonator Spectroscopy Shifted Fast\", soft_avg=platform.software_averages)", " dataset = mc.run(\"Resonator Spectroscopy Shifted Precision\", soft_avg=platform.software_averages)", " self.qc_pulse.duration = value", " self.qcm = qcm", " self.qcm.gain = value / 100", " self.base_duration = qc_pulse.duration", " self.qrm = qrm", " self.qrm.gain = value / 100", " platform.start()", " self.ro_pulse.start = value + 4", " self.qc2_pulse.start = self.pi_pulse_length // 2 + value", " self.ro_pulse.start = self.pi_pulse_length + value + 4", " self.qc2_pulse.start = self.pi_pulse_length//2 + value", " self.ro_pulse.start = 3 * self.pi_pulse_length//2 + 2 * value + 4", " self.qc3_pulse.start = (3 * self.pi_pulse_length)//2 + 2 * value", " self.ro_pulse.start = 2 * self.pi_pulse_length + 2 * value + 4", " return self.platform.execute(self.sequence)", " if os.environ.get(\"ENABLE_PLOTMON\", True):", " def __init__(self, qrm):", "", "def save_config_parameter(dictID, dictID1, key, value):", " def __init__(self, ro_pulse, qc_pulse):", " label = 'Qubit Control Pulse Length'", " shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values]", " mc.setpoints_grid([setpoints_length, setpoints_amplitude])", " if e.errno != errno.ENOENT:", " name = 'spin_echo_wait'" ]
METASEP
1
qiboteam__qibolab
qiboteam__qibolab METASEP examples/tii_single_qubit/main.py METASEP import yaml import time from diagnostics import run_resonator_spectroscopy, \ run_qubit_spectroscopy, \ run_rabi_pulse_length, \ run_rabi_pulse_gain, \ run_rabi_pulse_length_and_gain, \ run_rabi_pulse_length_and_amplitude, \ run_t1, \ run_ramsey, \ run_spin_echo if __name__ == "__main__": with open("settings.yaml", "r") as file: settings = yaml.safe_load(file) resonator_freq = 7798070000.0 qubit_freq = 8726500000.0 pi_pulse_length = 45 pi_pulse_gain = 0.14 pi_pulse_amplitude = 0.9 print("\nRun resonator spectroscopy.\n") resonator_freq, _ = run_resonator_spectroscopy(**settings["resonator_spectroscopy"]) print("\nRun qubit spectroscopy.\n") qubit_freq, _ = run_qubit_spectroscopy(resonator_freq, **settings["qubit_spectroscopy"]) print("\nRun Rabi pulse length.\n") run_rabi_pulse_length(resonator_freq, qubit_freq) print("\nRun Rabi pulse gain.\n") run_rabi_pulse_gain(resonator_freq, qubit_freq) print("\nRun Rabi pulse length and gain.\n") run_rabi_pulse_length_and_gain(resonator_freq, qubit_freq) print("\nRun Rabi pulse length and amplitude.\n") run_rabi_pulse_length_and_amplitude(resonator_freq, qubit_freq) print("\nRun t1.\n") run_t1(resonator_freq, qubit_freq, pi_pulse_gain, pi_pulse_length, **settings["t1"]) print("\nRun ramsey.\n") run_ramsey(resonator_freq, qubit_freq, pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude, **settings["ramsey"]) print("\nRun Spin Echo.\n") run_spin_echo(resonator_freq, qubit_freq, pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude, **settings["spin_echo"]) print("\nDiagnostics completed.\n") time.sleep(360) examples/tii_single_qubit/fitting.py METASEP import pathlib import numpy as np from scipy.optimize import curve_fit import matplotlib.pyplot as plt import os from quantify_core.analysis.base_analysis import BaseAnalysis from quantify_core.data.handling import set_datadir import lmfit import numpy as np def lorentzian_fit(label, peak, name): #label = directory where hdf5 data file generated by MC is located. #label=last --> Read most recent hdf5 #label=/path/to/directory/ --> read the hdf5 data file contained in "label" voltage, x_axis, data, d = data_post(label) frequency = x_axis #Create a lmfit model for fitting equation defined in resonator_peak model_Q = lmfit.Model(resonator_peak) #Guess parameters for Lorentzian max or min #to guess center if peak == max: guess_center = frequency[np.argmax(voltage)] #Argmax = Returns the indices of the maximum values along an axis. else: guess_center = frequency[np.argmin(voltage)] #Argmin = Returns the indices of the minimum values along an axis. #to guess the sigma if peak == max: voltage_min_i = np.argmin(voltage) frequency_voltage_min = frequency[voltage_min_i] guess_sigma = abs(frequency_voltage_min - guess_center) #500KHz*1e-9 else: guess_sigma = 5e-03 #500KHz*1e-9 #to guess the amplitude if peak == max: voltage_max = np.max(voltage) guess_amp = voltage_max*guess_sigma*np.pi else: voltage_min = np.min(voltage) guess_amp = -voltage_min*guess_sigma*np.pi #to guess the offset if peak == max: guess_offset = 0 else: guess_offset = voltage[0]*-2.5*1e5 #Add guessed parameters to the model if peak == max: model_Q.set_param_hint('center',value=guess_center,vary=True) else: model_Q.set_param_hint('center',value=guess_center,vary=False) model_Q.set_param_hint('sigma',value=guess_sigma, vary=True) model_Q.set_param_hint('amplitude',value=guess_amp, vary=True) model_Q.set_param_hint('offset',value=guess_offset, vary=True) guess_parameters = model_Q.make_params() guess_parameters #fit the model with the data and guessed parameters fit_res = model_Q.fit(data=voltage,frequency=frequency,params=guess_parameters) #print(fit_res.fit_report()) #fit_res.best_values #get the values for postprocessing and for legend. f0 = fit_res.best_values['center']/1e9 BW = (fit_res.best_values['sigma']*2)/1e9 Q = abs(f0/BW) #plot the fitted curve dummy_frequencies = np.linspace(np.amin(frequency),np.amax(frequency),101) fit_fine = resonator_peak(dummy_frequencies,**fit_res.best_values) fig,ax = plt.subplots(1,1,figsize=(8,3)) ax.plot(data.x0,data.y0*1e3,'o',label='Data') ax.plot(dummy_frequencies,fit_fine*1e3,'r-', label=r"Fit $f_0$ ={:.4f} GHz" "\n" " $Q$ ={:.0f}".format(f0,Q)) ax.set_ylabel('Integrated Voltage (mV)') ax.set_xlabel('Frequency (GHz)') ax.legend() plt.show() fig.savefig(pathlib.Path("data") / f"{name}.pdf", format='pdf') #fit_res.plot_fit(show_init=True) return f0, BW, Q def rabi_fit(dataset): pguess = [ np.mean(dataset['y0'].values), np.max(dataset['y0'].values) - np.min(dataset['y0'].values), 0.5/dataset['x0'].values[np.argmin(dataset['y0'].values)], np.pi/2, 0.1e-6 ] popt, pcov = curve_fit(rabi, dataset['x0'].values, dataset['y0'].values, p0=pguess) smooth_dataset = rabi(dataset['x0'].values, *popt) pi_pulse_duration = np.abs((1.0 / popt[2]) / 2) rabi_oscillations_pi_pulse_min_voltage = smooth_dataset.min() * 1e6 t1 = 1.0 / popt[4] #double check T1 return smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 def t1_fit(dataset): pguess = [ max(dataset['y0'].values), (max(dataset['y0'].values) - min(dataset['y0'].values)), 1/250 ] popt, pcov = curve_fit(exp, dataset['x0'].values, dataset['y0'].values, p0=pguess) smooth_dataset = exp(dataset['x0'].values, *popt) t1 = abs(1/popt[2]) return smooth_dataset, t1 def ramsey_fit(dataset): pguess = [ np.mean(dataset['y0'].values), np.max(dataset['y0'].values) - np.min(dataset['y0'].values), 0.5/dataset['x0'].values[np.argmin(dataset['y0'].values)], np.pi/2, 0.1e-6 ] popt, pcov = curve_fit(ramsey, dataset['x0'].values, dataset['y0'].values, p0=pguess) smooth_dataset = ramsey(dataset['x0'].values, *popt) delta_frequency = popt[2] t2 = 1.0 / popt[4] return smooth_dataset, delta_frequency, t2 def resonator_peak(frequency,amplitude,center,sigma,offset): #http://openafox.com/science/peak-function-derivations.html return (amplitude/np.pi) * (sigma/((frequency-center)**2 + sigma**2) + offset) def rabi(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # Period T : 1/p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] #return p[0] + p[1] * np.sin(2 * np.pi / p[2] * x + p[3]) * np.exp(-x / p[4]) return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(- x * p4) def ramsey(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # DeltaFreq : p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] #return p[0] + p[1] * np.sin(2 * np.pi / p[2] * x + p[3]) * np.exp(-x / p[4]) return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(- x * p4) def exp(x,*p) : return p[0] - p[1]*np.exp(-1 * x * p[2]) #Read last hdf5 file generated by the mc or specify the directory def data_post(dir = "last"): if dir == "last": #get last measured file directory = 'data/quantify' directory = max([subdir for subdir, dirs, files in os.walk(directory)], key=os.path.getmtime) label = os.path.basename(os.path.normpath(directory)) else: label = dir set_datadir('data/quantify') d = BaseAnalysis(tuid=label) d.run() data = d.dataset # #clean the array arr1 = data.y0; voltage = [None] * len(arr1); for i in range(0, len(arr1)): voltage[i] = float(arr1[i]); arr1 = data.x0; x_axis = [None] * len(arr1); for i in range(0, len(arr1)): x_axis[i] = float(arr1[i]); plt.plot(x_axis,voltage) #plt.show() return voltage, x_axis, data, d examples/tii_single_qubit/diagnostics.py METASEP import pathlib import numpy as np import matplotlib.pyplot as plt import yaml import fitting from qibolab import Platform # TODO: Have a look in the documentation of ``MeasurementControl`` from quantify_core.measurement import MeasurementControl from quantify_core.measurement.control import Gettable, Settable from quantify_core.data.handling import set_datadir from scipy.signal import savgol_filter from qibolab.pulses import Pulse, ReadoutPulse from qibolab.circuit import PulseSequence from qibolab.pulse_shapes import Rectangular, Gaussian # TODO: Check why this set_datadir is needed set_datadir(pathlib.Path(__file__).parent / "data" / "quantify") def variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step): #[. . . . . .][...................]0[...................][. . . . . .] #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------] #>. .< lowres_step # >..< highres_step # ^ centre value = 0 scanrange = np.concatenate( ( np.arange(-lowres_width,-highres_width,lowres_step), np.arange(-highres_width,highres_width,highres_step), np.arange(highres_width,lowres_width,lowres_step) ) ) return scanrange def backup_config_file(platform): import os import shutil import errno from datetime import datetime original = str(platform.runcard) now = datetime.now() now = now.strftime("%d%m%Y%H%M%S") destination_file_name = "tiiq_" + now + ".yml" target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name)) try: print("Copying file: " + original) print("Destination file" + target) shutil.copyfile(original, target) print("Platform settings backup done") except IOError as e: # ENOENT(2): file does not exist, raised also on missing dest parent dir if e.errno != errno.ENOENT: raise # try creating parent directories os.makedirs(os.path.dirname(target)) shutil.copy(original, target) def get_config_parameter(dictID, dictID1, key): import os calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml')) with open(calibration_path) as file: settings = yaml.safe_load(file) file.close() if (not dictID1): return settings[dictID][key] else: return settings[dictID][dictID1][key] def save_config_parameter(dictID, dictID1, key, value): import os calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'tiiq.yml')) with open(calibration_path, "r") as file: settings = yaml.safe_load(file) file.close() if (not dictID1): settings[dictID][key] = value print("Saved value: " + str(settings[dictID][key])) else: settings[dictID][dictID1][key] = value print("Saved value: " + str(settings[dictID][dictID1][key])) with open(calibration_path, "w") as file: settings = yaml.dump(settings, file, sort_keys=False, indent=4) file.close() def plot(smooth_dataset, dataset, label, type): if (type == 0): #cavity plots fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61)) ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0') ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1') ax.title.set_text(label) ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2') plt.savefig(pathlib.Path("data") / f"{label}.pdf") return if (type == 1): #qubit spec, rabi, ramsey, t1 plots fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61)) ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0') ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1') ax.title.set_text(label) ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2') plt.savefig(pathlib.Path("data") / f"{label}.pdf") return def plot_qubit_states(gnd_results, exc_results): plt.figure(figsize=[4,4]) # Plot all the results # All results from the gnd_schedule are plotted in blue plt.scatter(np.real(gnd_results), np.imag(gnd_results), s=5, cmap='viridis', c='blue', alpha=0.5, label='state_0') # All results from the exc_schedule are plotted in red plt.scatter(np.real(exc_results), np.imag(exc_results), s=5, cmap='viridis', c='red', alpha=0.5, label='state_1') # Plot a large dot for the average result of the 0 and 1 states. mean_gnd = np.mean(gnd_results) # takes mean of both real and imaginary parts mean_exc = np.mean(exc_results) plt.scatter(np.real(mean_gnd), np.imag(mean_gnd), s=200, cmap='viridis', c='black',alpha=1.0, label='state_0_mean') plt.scatter(np.real(mean_exc), np.imag(mean_exc), s=200, cmap='viridis', c='black',alpha=1.0, label='state_1_mean') plt.ylabel('I [a.u.]', fontsize=15) plt.xlabel('Q [a.u.]', fontsize=15) plt.title("0-1 discrimination", fontsize=15) plt.show() def create_measurement_control(name): import os if os.environ.get("ENABLE_PLOTMON", True): mc = MeasurementControl(f'MC {name}') from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}') mc.instr_plotmon(plotmon.name) from quantify_core.visualization.instrument_monitor import InstrumentMonitor insmon = InstrumentMonitor(f"Instruments Monitor {name}") mc.instrument_monitor(insmon.name) return mc, plotmon, insmon else: mc = MeasurementControl(f'MC {name}') return mc, plotmon, insmon # TODO: be able to choose which windows are opened and remember their sizes and dimensions class Diagnostics(): def __init__(self, platform: Platform): self.platform = platform self.mc, self.pl, self.ins = create_measurement_control('Diagnostics') def load_settings(self): # Load diagnostics settings with open("diagnostics.yml", "r") as file: return yaml.safe_load(file) def run_resonator_spectroscopy(self): platform = self.platform platform.reload_settings() mc = self.mc ps = platform.settings['settings'] qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1]) qc_pulse_settings = ps['qc_spectroscopy_pulse'] qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape) ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1]) ro_pulse_settings = ps['readout_pulse'] ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape) sequence = PulseSequence() sequence.add(qc_pulse) sequence.add(ro_pulse) ds = self.load_settings() self.pl.tuids_max_num(ds['max_num_plots']) software_averages = ds['software_averages'] ds = ds['resonator_spectroscopy'] lowres_width = ds['lowres_width'] lowres_step = ds['lowres_step'] highres_width = ds['highres_width'] highres_step = ds['highres_step'] precision_width = ds['precision_width'] precision_step = ds['precision_step'] #Fast Sweep scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + platform.LO_qrm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() platform.LO_qcm.off() dataset = mc.run("Resonator Spectroscopy Fast", soft_avg=1) platform.stop() platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values]) avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6 # Precision Sweep scanrange = np.arange(-precision_width, precision_width, precision_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + platform.LO_qrm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() platform.LO_qcm.off() dataset = mc.run("Resonator Spectroscopy Precision", soft_avg=software_averages) platform.stop() # Fitting smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2) # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency max_ro_voltage = smooth_dataset.max() * 1e6 f0, BW, Q = fitting.lorentzian_fit("last", max, "Resonator_spectroscopy") resonator_freq = (f0*1e9 + ro_pulse.frequency) print(f"\nResonator Frequency = {resonator_freq}") return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset def run_punchout(self): platform = self.platform platform.reload_settings() mc = self.mc ps = platform.settings['settings'] qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1]) qc_pulse_settings = ps['qc_spectroscopy_pulse'] qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape) ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1]) ro_pulse_settings = ps['readout_pulse'] ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape) sequence = PulseSequence() sequence.add(qc_pulse) sequence.add(ro_pulse) ds = self.load_settings()['punchout'] self.pl.tuids_max_num(ds['max_num_plots']) software_averages = ds['software_averages'] ds = ds['punchout'] precision_width = ds['precision_width'] precision_step = ds['precision_step'] scanrange = np.arange(-precision_width, precision_width, precision_step) scanrange = scanrange + platform.LO_qrm.get_frequency() mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))]) setpoints_gain = np.arange(10, 100, 10) mc.setpoints_grid([scanrange, setpoints_gain]) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() platform.LO_qcm.off() dataset = mc.run("Punchout", soft_avg=software_averages) platform.stop() # Fitting smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2) #FIXME: Code Lorentzian fitting for cavity spec and punchout resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency print(f"\nResonator Frequency = {resonator_freq}") print(f"\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}") return resonator_freq, smooth_dataset, dataset def run_qubit_spectroscopy(self): platform = self.platform platform.reload_settings() mc = self.mc ps = platform.settings['settings'] qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1]) qc_pulse_settings = ps['qc_spectroscopy_pulse'] qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape) ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1]) ro_pulse_settings = ps['readout_pulse'] ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape) sequence = PulseSequence() sequence.add(qc_pulse) sequence.add(ro_pulse) ds = self.load_settings() self.pl.tuids_max_num(ds['max_num_plots']) software_averages = ds['software_averages'] ds = ds['qubit_spectroscopy'] fast_start = ds['fast_start'] fast_end = ds['fast_end'] fast_step = ds['fast_step'] precision_start = ds['precision_start'] precision_end = ds['precision_end'] precision_step = ds['precision_step'] # Fast Sweep fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step) mc.settables(platform.LO_qcm.device.frequency) mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Qubit Spectroscopy Fast", soft_avg=1) platform.stop() # Precision Sweep platform.software_averages = 1 precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step) mc.settables(platform.LO_qcm.device.frequency) mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Qubit Spectroscopy Precision", soft_avg=software_averages) platform.stop() # Fitting smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2) qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency min_ro_voltage = smooth_dataset.min() * 1e6 print(f"\nQubit Frequency = {qubit_freq}") plot(smooth_dataset, dataset, "Qubit_Spectroscopy", 1) print("Qubit freq ontained from MC results: ", qubit_freq) f0, BW, Q = fitting.lorentzian_fit("last", min, "Qubit_Spectroscopy") qubit_freq = (f0*1e9 - qc_pulse.frequency) print("Qubit freq ontained from fitting: ", qubit_freq) return qubit_freq, min_ro_voltage, smooth_dataset, dataset def run_rabi_pulse_length(self): platform = self.platform platform.reload_settings() mc = self.mc ps = platform.settings['settings'] qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1]) qc_pulse_settings = ps['qc_spectroscopy_pulse'] qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape) ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1]) ro_pulse_settings = ps['readout_pulse'] ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape) sequence = PulseSequence() sequence.add(qc_pulse) sequence.add(ro_pulse) ds = self.load_settings() self.pl.tuids_max_num(ds['max_num_plots']) software_averages = ds['software_averages'] ds = ds['rabi_pulse_length'] pulse_duration_start = ds['pulse_duration_start'] pulse_duration_end = ds['pulse_duration_end'] pulse_duration_step = ds['pulse_duration_step'] mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse))) mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages) platform.stop() # Fitting pi_pulse_amplitude = qc_pulse.amplitude smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset) pi_pulse_gain = platform.qcm.gain plot(smooth_dataset, dataset, "Rabi_pulse_length", 1) print(f"\nPi pulse duration = {pi_pulse_duration}") print(f"\nPi pulse amplitude = {pi_pulse_amplitude}") #Check if the returned value from fitting is correct. print(f"\nPi pulse gain = {pi_pulse_gain}") #Needed? It is equal to the QCM gain when performing a Rabi. print(f"\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}") print(f"\nT1 = {t1}") return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1 def run_rabi_pulse_gain(self, platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse): #qubit pulse duration=200 platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.software_averages = 3 mc.settables(Settable(QCPulseGainParameter(platform.qcm))) mc.setpoints(np.arange(0, 100, 10)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages) platform.stop() return dataset def run_rabi_pulse_length_and_gain(self, platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.software_averages = 1 mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)), Settable(QCPulseGainParameter(platform.qcm))]) setpoints_length = np.arange(1, 400, 10) setpoints_gain = np.arange(0, 20, 1) mc.setpoints_grid([setpoints_length, setpoints_gain]) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages) # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain # platform.pi_pulse_length = # platform.pi_pulse_gain = platform.stop() return dataset def run_rabi_pulse_length_and_amplitude(self, platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.software_averages = 1 mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)), Settable(QCPulseAmplitudeParameter(qc_pulse))]) setpoints_length = np.arange(1, 1000, 2) setpoints_amplitude = np.arange(0, 100, 2) mc.setpoints_grid([setpoints_length, setpoints_amplitude]) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages) # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain # platform.pi_pulse_length = # platform.pi_pulse_gain = platform.stop() return dataset # T1: RX(pi) - wait t(rotates z) - readout def run_t1(self): platform = self.platform platform.reload_settings() mc = self.mc ps = platform.settings['settings'] start = 0 frequency = ps['pi_pulse_frequency'] amplitude = ps['pi_pulse_amplitude'] duration = ps['pi_pulse_duration'] phase = 0 shape = eval(ps['pi_pulse_shape']) qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape) ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1]) ro_pulse_settings = ps['readout_pulse'] ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape) sequence = PulseSequence() sequence.add(qc_pi_pulse) sequence.add(ro_pulse) ds = self.load_settings() self.pl.tuids_max_num(ds['max_num_plots']) software_averages = ds['software_averages'] ds = ds['t1'] delay_before_readout_start = ds['delay_before_readout_start'] delay_before_readout_end = ds['delay_before_readout_end'] delay_before_readout_step = ds['delay_before_readout_step'] mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse))) mc.setpoints(np.arange(delay_before_readout_start, delay_before_readout_end, delay_before_readout_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('T1', soft_avg = software_averages) platform.stop() # Fitting smooth_dataset, t1 = fitting.t1_fit(dataset) plot(smooth_dataset, dataset, "t1", 1) print(f'\nT1 = {t1}') return t1, smooth_dataset, dataset # Ramsey: RX(pi/2) - wait t(rotates z) - RX(pi/2) - readout def run_ramsey(self): platform = self.platform platform.reload_settings() mc = self.mc ps = platform.settings['settings'] start = 0 frequency = ps['pi_pulse_frequency'] amplitude = ps['pi_pulse_amplitude'] duration = ps['pi_pulse_duration'] phase = 0 shape = eval(ps['pi_pulse_shape']) qc_pi_half_pulse_1 = Pulse(start, duration, amplitude/2, frequency, phase, shape) qc_pi_half_pulse_2 = Pulse(qc_pi_half_pulse_1.start + qc_pi_half_pulse_1.duration, duration, amplitude/2, frequency, phase, shape) ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1]) ro_pulse_settings = ps['readout_pulse'] ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape) sequence = PulseSequence() sequence.add(qc_pi_half_pulse_1) sequence.add(qc_pi_half_pulse_2) sequence.add(ro_pulse) ds = self.load_settings() self.pl.tuids_max_num(ds['max_num_plots']) software_averages = ds['software_averages'] ds = ds['ramsey'] delay_between_pulses_start = ds['delay_between_pulses_start'] delay_between_pulses_end = ds['delay_between_pulses_end'] delay_between_pulses_step = ds['delay_between_pulses_step'] mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc_pi_half_pulse_2, platform.settings['settings']['pi_pulse_duration']))) mc.setpoints(np.arange(delay_between_pulses_start, delay_between_pulses_end, delay_between_pulses_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Ramsey', soft_avg = software_averages) platform.stop() # Fitting smooth_dataset, delta_frequency, t2 = fitting.ramsey_fit(dataset) plot(smooth_dataset, dataset, "Ramsey", 1) print(f"\nDelta Frequency = {delta_frequency}") print(f"\nT2 = {t2} ns") return t2, smooth_dataset, dataset # Spin Echo: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - readout def run_spin_echo(self): platform = self.platform platform.reload_settings() mc = self.mc ps = platform.settings['settings'] start = 0 frequency = ps['pi_pulse_frequency'] amplitude = ps['pi_pulse_amplitude'] duration = ps['pi_pulse_duration'] phase = 0 shape = eval(ps['pi_pulse_shape']) qc_pi_half_pulse = Pulse(start, duration, amplitude/2, frequency, phase, shape) qc_pi_pulse = Pulse(qc_pi_half_pulse.start + qc_pi_half_pulse.duration, duration, amplitude, frequency, phase, shape) ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1]) ro_pulse_settings = ps['readout_pulse'] ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape) sequence = PulseSequence() sequence.add(qc_pi_half_pulse) sequence.add(qc_pi_pulse) sequence.add(ro_pulse) ds = self.load_settings() self.pl.tuids_max_num(ds['max_num_plots']) software_averages = ds['software_averages'] ds = ds['spin_echo'] delay_between_pulses_start = ds['delay_between_pulses_start'] delay_between_pulses_end = ds['delay_between_pulses_end'] delay_between_pulses_step = ds['delay_between_pulses_step'] mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc_pi_pulse, platform.settings['settings']['pi_pulse_duration']))) mc.setpoints(np.arange(delay_between_pulses_start, delay_between_pulses_end, delay_between_pulses_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Spin Echo', soft_avg = software_averages) platform.stop() # Fitting return dataset # Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout def run_spin_echo_3pulses(self): platform = self.platform platform.reload_settings() mc = self.mc ps = platform.settings['settings'] start = 0 frequency = ps['pi_pulse_frequency'] amplitude = ps['pi_pulse_amplitude'] duration = ps['pi_pulse_duration'] phase = 0 shape = eval(ps['pi_pulse_shape']) qc_pi_half_pulse_1 = Pulse(start, duration, amplitude/2, frequency, phase, shape) qc_pi_pulse = Pulse(qc_pi_half_pulse_1.start + qc_pi_half_pulse_1.duration, duration, amplitude, frequency, phase, shape) qc_pi_half_pulse_2 = Pulse(qc_pi_pulse.start + qc_pi_pulse.duration, duration, amplitude/2, frequency, phase, shape) ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1]) ro_pulse_settings = ps['readout_pulse'] ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape) sequence = PulseSequence() sequence.add(qc_pi_half_pulse_1) sequence.add(qc_pi_pulse) sequence.add(qc_pi_half_pulse_2) sequence.add(ro_pulse) ds = self.load_settings() self.pl.tuids_max_num(ds['max_num_plots']) software_averages = ds['software_averages'] ds = ds['spin_echo_3pulses'] delay_between_pulses_start = ds['delay_between_pulses_start'] delay_between_pulses_end = ds['delay_between_pulses_end'] delay_between_pulses_step = ds['delay_between_pulses_step'] mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc_pi_pulse, qc_pi_half_pulse_2, platform.settings['settings']['pi_pulse_duration'])) mc.setpoints(np.arange(delay_between_pulses_start, delay_between_pulses_end, delay_between_pulses_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Spin Echo 3 Pulses', soft_avg = software_averages) platform.stop() return dataset def run_shifted_resonator_spectroscopy(self, platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, lowres_width, lowres_step, highres_width, highres_step, precision_width, precision_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) # Fast Sweep platform.software_averages = 1 scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + platform.LO_qrm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Resonator Spectroscopy Shifted Fast", soft_avg=platform.software_averages) platform.stop() shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values] # Precision Sweep platform.software_averages = 1 scanrange = np.arange(-precision_width, precision_width, precision_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + shifted_LO_frequency) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Resonator Spectroscopy Shifted Precision", soft_avg=platform.software_averages) platform.stop() smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2) shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency shifted_max_ro_voltage = smooth_dataset.max() * 1e6 print('\n') print(f"\nResonator Frequency = {shifted_frequency}") print(f"Maximum Voltage Measured = {shifted_max_ro_voltage} μV") return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset def callibrate_qubit_states(self, platform, sequence, niter, nshots, resonator_freq, qubit_freq, ro_pulse, qc_pulse=None): import math platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) if (qc_pulse != None): platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.start() if (qc_pulse == None): platform.LO_qcm.off() all_states = [] for i in range(niter): qubit_state = platform.execute(sequence, nshots) #Compose complex point from i, q obtained from execution point = complex(qubit_state[2], qubit_state[3]) all_states.add(point) platform.stop() return all_states, np.mean(all_states) def classify(point: complex, mean_gnd, mean_exc): import math """Classify the given state as |0> or |1>.""" def distance(a, b): return math.sqrt((np.real(a) - np.real(b))**2 + (np.imag(a) - np.imag(b))**2) return int(distance(point, mean_exc) < distance(point, mean_gnd)) # help classes class QCPulseLengthParameter(): label = 'Qubit Control Pulse Length' unit = 'ns' name = 'qc_pulse_length' def __init__(self, ro_pulse, qc_pulse): self.ro_pulse = ro_pulse self.qc_pulse = qc_pulse def set(self, value): self.qc_pulse.duration = value self.ro_pulse.start = value + 4 class QCPulseGainParameter(): label = 'Qubit Control Gain' unit = '%' name = 'qc_pulse_gain' def __init__(self, qcm): self.qcm = qcm def set(self,value): self.qcm.gain = value / 100 class QCPulseAmplitudeParameter(): label = 'Qubit Control Pulse Amplitude' unit = '%' name = 'qc_pulse_amplitude' def __init__(self, qc_pulse): self.qc_pulse = qc_pulse def set(self, value): self.qc_pulse.amplitude = value / 100 class T1WaitParameter(): label = 'Time' unit = 'ns' name = 't1_wait' initial_value = 0 def __init__(self, ro_pulse, qc_pulse): self.ro_pulse = ro_pulse self.base_duration = qc_pulse.duration def set(self, value): # TODO: implement following condition #must be >= 4ns <= 65535 #platform.delay_before_readout = value self.ro_pulse.start = self.base_duration + 4 + value class RamseyWaitParameter(): label = 'Time' unit = 'ns' name = 'ramsey_wait' initial_value = 0 def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length): self.ro_pulse = ro_pulse self.qc2_pulse = qc2_pulse self.pi_pulse_length = pi_pulse_length def set(self, value): self.qc2_pulse.start = self.pi_pulse_length + value self.ro_pulse.start = self.pi_pulse_length * 2 + value + 4 class SpinEchoWaitParameter(): label = 'Time' unit = 'ns' name = 'spin_echo_wait' initial_value = 0 def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length): self.ro_pulse = ro_pulse self.qc2_pulse = qc2_pulse self.pi_pulse_length = pi_pulse_length def set(self, value): self.qc2_pulse.start = self.pi_pulse_length + value self.ro_pulse.start = 2 * self.pi_pulse_length + 2 * value + 4 class SpinEcho3PWaitParameter(): label = 'Time' unit = 'ns' name = 'spin_echo_wait' initial_value = 0 def __init__(self, ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length): self.ro_pulse = ro_pulse self.qc2_pulse = qc2_pulse self.qc3_pulse = qc3_pulse self.pi_pulse_length = pi_pulse_length def set(self,value): self.qc2_pulse.start = self.pi_pulse_length + value self.qc3_pulse.start = 2 * self.pi_pulse_length + 2 * value self.ro_pulse.start = 3 * self.pi_pulse_length + 2 * value + 4 class QRPulseGainParameter(): label = 'Qubit Readout Gain' unit = '%' name = 'ro_pulse_gain' def __init__(self, qrm): self.qrm = qrm def set(self,value): self.qrm.gain = value / 100 class ROController(): # Quantify Gettable Interface Implementation label = ['Amplitude', 'Phase','I','Q'] unit = ['V', 'Radians','V','V'] name = ['A', 'Phi','I','Q'] def __init__(self, platform, sequence): self.platform = platform self.sequence = sequence def get(self): return self.platform.execute(self.sequence) examples/qili_single_qubit/main.py METASEP import yaml import time from diagnostics import run_resonator_spectroscopy, \ run_qubit_spectroscopy, \ run_rabi_pulse_length, \ run_rabi_pulse_gain, \ run_rabi_pulse_length_and_gain, \ run_rabi_pulse_length_and_amplitude, \ run_t1, \ run_ramsey, \ run_spin_echo if __name__ == "__main__": with open("settings.yaml", "r") as file: settings = yaml.safe_load(file) resonator_freq = 7798070000.0 qubit_freq = 8726500000.0 pi_pulse_length = 45 pi_pulse_gain = 0.14 pi_pulse_amplitude = 0.9 print("\nRun resonator spectroscopy.\n") resonator_freq, _ = run_resonator_spectroscopy(**settings["resonator_spectroscopy"]) print("\nRun qubit spectroscopy.\n") qubit_freq, _ = run_qubit_spectroscopy(resonator_freq, **settings["qubit_spectroscopy"]) print("\nRun Rabi pulse length.\n") run_rabi_pulse_length(resonator_freq, qubit_freq) print("\nRun Rabi pulse gain.\n") run_rabi_pulse_gain(resonator_freq, qubit_freq) print("\nRun Rabi pulse length and gain.\n") run_rabi_pulse_length_and_gain(resonator_freq, qubit_freq) print("\nRun Rabi pulse length and amplitude.\n") run_rabi_pulse_length_and_amplitude(resonator_freq, qubit_freq) print("\nRun t1.\n") run_t1(resonator_freq, qubit_freq, pi_pulse_gain, pi_pulse_length, **settings["t1"]) print("\nRun ramsey.\n") run_ramsey(resonator_freq, qubit_freq, pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude, **settings["ramsey"]) print("\nRun Spin Echo.\n") run_spin_echo(resonator_freq, qubit_freq, pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude, **settings["spin_echo"]) print("\nDiagnostics completed.\n") time.sleep(360) examples/qili_single_qubit/fitting.py METASEP import pathlib import numpy as np from scipy.optimize import curve_fit import matplotlib.pyplot as plt import os from quantify_core.analysis.base_analysis import BaseAnalysis from quantify_core.data.handling import set_datadir import lmfit import numpy as np def lorentzian_fit(label, peak): #label = directory where hdf5 data file generated by MC is located. #label=last --> Read most recent hdf5 #label=/path/to/directory/ --> read the hdf5 data file contained in "label" voltage, x_axis, data, d = data_post(label) frequency = x_axis #Create a lmfit model for fitting equation defined in resonator_peak model_Q = lmfit.Model(resonator_peak) #Guess parameters for Lorentzian max or min #to guess center if peak == max: guess_center = frequency[np.argmax(voltage)] #Argmax = Returns the indices of the maximum values along an axis. else: guess_center = frequency[np.argmin(voltage)] #Argmin = Returns the indices of the minimum values along an axis. #to guess the sigma if peak == max: voltage_min_i = np.argmin(voltage) frequency_voltage_min = frequency[voltage_min_i] guess_sigma = abs(frequency_voltage_min - guess_center) #500KHz*1e-9 else: guess_sigma = 5e-03 #500KHz*1e-9 #to guess the amplitude if peak == max: voltage_max = np.max(voltage) guess_amp = voltage_max*guess_sigma*np.pi else: voltage_min = np.min(voltage) guess_amp = -voltage_min*guess_sigma*np.pi #to guess the offset if peak == max: guess_offset = 0 else: guess_offset = voltage[0]*-2.5*1e5 #Add guessed parameters to the model if peak == max: model_Q.set_param_hint('center',value=guess_center,vary=True) else: model_Q.set_param_hint('center',value=guess_center,vary=False) model_Q.set_param_hint('sigma',value=guess_sigma, vary=True) model_Q.set_param_hint('amplitude',value=guess_amp, vary=True) model_Q.set_param_hint('offset',value=guess_offset, vary=True) guess_parameters = model_Q.make_params() guess_parameters #fit the model with the data and guessed parameters fit_res = model_Q.fit(data=voltage,frequency=frequency,params=guess_parameters) #print(fit_res.fit_report()) #fit_res.best_values #get the values for postprocessing and for legend. f0 = fit_res.best_values['center']/1e9 BW = (fit_res.best_values['sigma']*2)/1e9 Q = abs(f0/BW) #plot the fitted curve dummy_frequencies = np.linspace(np.amin(frequency),np.amax(frequency),101) fit_fine = resonator_peak(dummy_frequencies,**fit_res.best_values) fig,ax = plt.subplots(1,1,figsize=(8,3)) ax.plot(data.x0,data.y0*1e3,'o',label='Data') ax.plot(dummy_frequencies,fit_fine*1e3,'r-', label=r"Fit $f_0$ ={:.4f} GHz" "\n" " $Q$ ={:.0f}".format(f0,Q)) ax.set_ylabel('Integrated Voltage (mV)') ax.set_xlabel('Frequency (GHz)') ax.legend() plt.show() fig.savefig(pathlib.Path("data") / 'Resonator_Spectroscopy.pdf',format='pdf') #fit_res.plot_fit(show_init=True) return f0, BW, Q def rabi_fit(dataset): pguess = [ np.mean(dataset['y0'].values), np.max(dataset['y0'].values) - np.min(dataset['y0'].values), 0.5/dataset['x0'].values[np.argmin(dataset['y0'].values)], np.pi/2, 0.1e-6 ] popt, pcov = curve_fit(rabi, dataset['x0'].values, dataset['y0'].values, p0=pguess) smooth_dataset = rabi(dataset['x0'].values, *popt) pi_pulse_duration = np.abs((1.0 / popt[2]) / 2) rabi_oscillations_pi_pulse_min_voltage = smooth_dataset.min() * 1e6 t1 = 1.0 / popt[4] #double check T1 return smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 def t1_fit(dataset): pguess = [ max(dataset['y0'].values), (max(dataset['y0'].values) - min(dataset['y0'].values)), 1/250 ] popt, pcov = curve_fit(exp, dataset['x0'].values, dataset['y0'].values, p0=pguess) smooth_dataset = exp(dataset['x0'].values, *popt) t1 = abs(1/popt[2]) return smooth_dataset, t1 def ramsey_fit(dataset): pguess = [ np.mean(dataset['y0'].values), np.max(dataset['y0'].values) - np.min(dataset['y0'].values), 0.5/dataset['x0'].values[np.argmin(dataset['y0'].values)], np.pi/2, 0.1e-6 ] popt, pcov = curve_fit(ramsey, dataset['x0'].values, dataset['y0'].values, p0=pguess) smooth_dataset = ramsey(dataset['x0'].values, *popt) delta_frequency = popt[2] t2 = 1.0 / popt[4] return smooth_dataset, delta_frequency, t2 def resonator_peak(frequency,amplitude,center,sigma,offset): #http://openafox.com/science/peak-function-derivations.html return (amplitude/np.pi) * (sigma/((frequency-center)**2 + sigma**2) + offset) def rabi(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # Period T : 1/p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] #return p[0] + p[1] * np.sin(2 * np.pi / p[2] * x + p[3]) * np.exp(-x / p[4]) return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(- x * p4) def ramsey(x, p0, p1, p2, p3, p4): # A fit to Superconducting Qubit Rabi Oscillation # Offset : p[0] # Oscillation amplitude : p[1] # DeltaFreq : p[2] # Phase : p[3] # Arbitrary parameter T_2 : 1/p[4] #return p[0] + p[1] * np.sin(2 * np.pi / p[2] * x + p[3]) * np.exp(-x / p[4]) return p0 + p1 * np.sin(2 * np.pi * x * p2 + p3) * np.exp(- x * p4) def exp(x,*p) : return p[0] - p[1]*np.exp(-1 * x * p[2]) #Read last hdf5 file generated by the mc or specify the directory def data_post(dir = "last"): if dir == "last": #get last measured file directory = 'data/quantify' directory = max([subdir for subdir, dirs, files in os.walk(directory)], key=os.path.getmtime) label = os.path.basename(os.path.normpath(directory)) else: label = dir set_datadir('data/quantify') d = BaseAnalysis(tuid=label) d.run() data = d.dataset # #clean the array arr1 = data.y0; voltage = [None] * len(arr1); for i in range(0, len(arr1)): voltage[i] = float(arr1[i]); arr1 = data.x0; x_axis = [None] * len(arr1); for i in range(0, len(arr1)): x_axis[i] = float(arr1[i]); plt.plot(x_axis,voltage) #plt.show() return voltage, x_axis, data, d examples/qili_single_qubit/diagnostics.py METASEP import pathlib import numpy as np import matplotlib.pyplot as plt import yaml # TODO: Have a look in the documentation of ``MeasurementControl`` from quantify_core.measurement import MeasurementControl from quantify_core.measurement.control import Gettable, Settable from quantify_core.data.handling import set_datadir from scipy.signal import savgol_filter # TODO: Check why this set_datadir is needed set_datadir(pathlib.Path(__file__).parent / "data" / "quantify") def backup_config_file(): import os import shutil import errno from datetime import datetime original = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'qili.yml')) now = datetime.now() now = now.strftime("%d%m%Y%H%M%S") destination_file_name = "qili_" + now + ".yml" target = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data/settings_backups', destination_file_name)) try: print("Copying file: " + original) print("Destination file" + target) shutil.copyfile(original, target) print("Platform settings backup done") except IOError as e: # ENOENT(2): file does not exist, raised also on missing dest parent dir if e.errno != errno.ENOENT: raise # try creating parent directories os.makedirs(os.path.dirname(target)) shutil.copy(original, target) def get_config_parameter(dictID, dictID1, key): import os calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'qili.yml')) with open(calibration_path) as file: settings = yaml.safe_load(file) file.close() if (not dictID1): return settings[dictID][key] else: return settings[dictID][dictID1][key] def save_config_parameter(dictID, dictID1, key, value): import os calibration_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', 'src', 'qibolab', 'runcards', 'qili.yml')) with open(calibration_path, "r") as file: settings = yaml.safe_load(file) file.close() if (not dictID1): settings[dictID][key] = value print("Saved value: " + str(settings[dictID][key])) else: settings[dictID][dictID1][key] = value print("Saved value: " + str(settings[dictID][dictID1][key])) with open(calibration_path, "w") as file: settings = yaml.dump(settings, file, sort_keys=False, indent=4) file.close() def plot(smooth_dataset, dataset, label, type): if (type == 0): #cavity plots fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61)) ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0') ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1') ax.title.set_text(label) ax.plot(dataset['x0'].values[smooth_dataset.argmax()], smooth_dataset[smooth_dataset.argmax()], 'o', color='C2') plt.savefig(pathlib.Path("data") / f"{label}.pdf") return if (type == 1): #qubit spec, rabi, ramsey, t1 plots fig, ax = plt.subplots(1, 1, figsize=(15, 15/2/1.61)) ax.plot(dataset['x0'].values, dataset['y0'].values,'-',color='C0') ax.plot(dataset['x0'].values, smooth_dataset,'-',color='C1') ax.title.set_text(label) ax.plot(dataset['x0'].values[smooth_dataset.argmin()], smooth_dataset[smooth_dataset.argmin()], 'o', color='C2') plt.savefig(pathlib.Path("data") / f"{label}.pdf") return def create_measurement_control(name): import os if os.environ.get("ENABLE_PLOTMON", False): mc = MeasurementControl(f'MC {name}') from quantify_core.visualization.pyqt_plotmon import PlotMonitor_pyqt plotmon = PlotMonitor_pyqt(f'Plot Monitor {name}') plotmon.tuids_max_num(3) mc.instr_plotmon(plotmon.name) from quantify_core.visualization.instrument_monitor import InstrumentMonitor insmon = InstrumentMonitor(f"Instruments Monitor {name}") mc.instrument_monitor(insmon.name) return mc, plotmon, insmon else: mc = MeasurementControl(f'MC {name}') return mc, None, None class ROController(): # Quantify Gettable Interface Implementation label = ['Amplitude', 'Phase','I','Q'] unit = ['V', 'Radians','V','V'] name = ['A', 'Phi','I','Q'] def __init__(self, platform, sequence): self.platform = platform self.sequence = sequence def get(self): return self.platform.execute(self.sequence) def variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step): #[. . . . . .][...................]0[...................][. . . . . .] #[-------- lowres_width ---------][-- highres_width --] [-- highres_width --][-------- lowres_width ---------] #>. .< lowres_step # >..< highres_step # ^ centre value = 0 scanrange = np.concatenate( ( np.arange(-lowres_width,-highres_width,lowres_step), np.arange(-highres_width,highres_width,highres_step), np.arange(highres_width,lowres_width,lowres_step) ) ) return scanrange def run_resonator_spectroscopy(platform, mc, sequence, ro_pulse, lowres_width, lowres_step, highres_width, highres_step, precision_width, precision_step): #Fast Sweep platform.software_averages = 1 scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + platform.LO_qrm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() platform.LO_qcm.off() dataset = mc.run("Resonator Spectroscopy Fast", soft_avg=platform.software_averages) platform.stop() platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values]) avg_min_voltage = np.mean(dataset['y0'].values[:25]) * 1e6 # Precision Sweep platform.software_averages = 1 scanrange = np.arange(-precision_width, precision_width, precision_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + platform.LO_qrm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() platform.LO_qcm.off() dataset = mc.run("Resonator Spectroscopy Precision", soft_avg=platform.software_averages) platform.stop() smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2) resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency max_ro_voltage = smooth_dataset.max() * 1e6 print(f"\nResonator Frequency = {resonator_freq}") return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset def run_punchout(platform, mc, sequence, ro_pulse, precision_width, precision_step): platform.software_averages = 1 scanrange = np.arange(-precision_width, precision_width, precision_step) scanrange = scanrange + platform.LO_qrm.get_frequency() mc.settables([Settable(platform.LO_qrm.device.frequency), Settable(QRPulseGainParameter(platform.qrm))]) setpoints_gain = np.arange(10, 100, 10) mc.setpoints_grid([scanrange, setpoints_gain]) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() platform.LO_qcm.off() dataset = mc.run("Punchout", soft_avg=platform.software_averages) platform.stop() smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2) #FIXME: Code Lorentzian fitting for cavity spec and punchout resonator_freq = dataset['x0'].values[dataset['y0'].argmax().values]+ro_pulse.frequency print(f"\nResonator Frequency = {resonator_freq}") print(f"\nResonator LO Frequency = {resonator_freq - ro_pulse.frequency}") return resonator_freq, smooth_dataset, dataset def run_qubit_spectroscopy(platform, mc, resonator_freq, sequence, qc_pulse, ro_pulse, fast_start, fast_end, fast_step, precision_start, precision_end, precision_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.software_averages = 1 # Fast Sweep fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step) mc.settables(platform.LO_qcm.device.frequency) mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Qubit Spectroscopy Fast", soft_avg=platform.software_averages) platform.stop() # Precision Sweep platform.software_averages = 1 precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step) mc.settables(platform.LO_qcm.device.frequency) mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Qubit Spectroscopy Precision", soft_avg=platform.software_averages) platform.stop() smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2) qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency min_ro_voltage = smooth_dataset.min() * 1e6 return qubit_freq, min_ro_voltage, smooth_dataset, dataset def run_rabi_pulse_length(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq - qc_pulse.frequency) platform.software_averages = 3 mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse))) mc.setpoints(np.arange(1, 400, 1)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Length', soft_avg = platform.software_averages) platform.stop() return dataset, platform.qcm.gain def run_rabi_pulse_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse): #qubit pulse duration=200 platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.software_averages = 3 mc.settables(Settable(QCPulseGainParameter(platform.qcm))) mc.setpoints(np.arange(0, 100, 10)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Gain', soft_avg = platform.software_averages) platform.stop() return dataset def run_rabi_pulse_length_and_gain(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.software_averages = 1 mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)), Settable(QCPulseGainParameter(platform.qcm))]) setpoints_length = np.arange(1, 400, 10) setpoints_gain = np.arange(0, 20, 1) mc.setpoints_grid([setpoints_length, setpoints_gain]) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages) # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain # platform.pi_pulse_length = # platform.pi_pulse_gain = platform.stop() return dataset def run_rabi_pulse_length_and_amplitude(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.software_averages = 1 mc.settables([Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)), Settable(QCPulseAmplitudeParameter(qc_pulse))]) setpoints_length = np.arange(1, 1000, 2) setpoints_amplitude = np.arange(0, 100, 2) mc.setpoints_grid([setpoints_length, setpoints_amplitude]) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Rabi Pulse Length and Gain', soft_avg = platform.software_averages) # Analyse data to look for the smallest qc_pulse length that renders off-resonance amplitude, determine corresponding pi_pulse gain # platform.pi_pulse_length = # platform.pi_pulse_gain = platform.stop() return dataset def run_t1(platform, mc,resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, pi_pulse_gain, pi_pulse_duration, delay_before_readout_start, delay_before_readout_end, delay_before_readout_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.qcm.gain = pi_pulse_gain mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pulse))) mc.setpoints(np.arange(delay_before_readout_start, delay_before_readout_end, delay_before_readout_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('T1', soft_avg = platform.software_averages) platform.stop() return dataset def run_ramsey(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, pi_pulse_gain, pi_pulse_duration, start_start, start_end, start_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.qcm.gain = pi_pulse_gain mc.settables(Settable(RamseyWaitParameter(ro_pulse, qc2_pulse, pi_pulse_duration))) mc.setpoints(np.arange(start_start, start_end, start_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Ramsey', soft_avg = platform.software_averages) platform.stop() return dataset def run_spin_echo(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, ro_pulse, pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude, start_start, start_end, start_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.qcm.gain = pi_pulse_gain mc.settables(Settable(SpinEchoWaitParameter(ro_pulse, qc2_pulse, pi_pulse_length))) mc.setpoints(np.arange(start_start, start_end, start_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Spin Echo', soft_avg = platform.software_averages) platform.stop() return dataset # Spin Echo 3 Pulses: RX(pi/2) - wait t(rotates z) - RX(pi) - wait t(rotates z) - RX(pi/2) - readout def run_spin_echo_3pulses(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, qc2_pulse, qc3_pulse, ro_pulse, pi_pulse_gain, pi_pulse_length, pi_pulse_amplitude, start_start, start_end, start_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) platform.qcm.gain = pi_pulse_gain mc.settables(SpinEcho3PWaitParameter(ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length)) mc.setpoints(np.arange(start_start, start_end, start_step)) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run('Spin Echo 3 Pulses', soft_avg = platform.software_averages) platform.stop() return dataset def run_shifted_resonator_spectroscopy(platform, mc, resonator_freq, qubit_freq, sequence, qc_pulse, ro_pulse, lowres_width, lowres_step, highres_width, highres_step, precision_width, precision_step): platform.LO_qrm.set_frequency(resonator_freq - ro_pulse.frequency) platform.LO_qcm.set_frequency(qubit_freq + qc_pulse.frequency) # Fast Sweep platform.software_averages = 1 scanrange = variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + platform.LO_qrm.get_frequency()) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Resonator Spectroscopy Shifted Fast", soft_avg=platform.software_averages) platform.stop() shifted_LO_frequency = dataset['x0'].values[dataset['y0'].argmax().values] # Precision Sweep platform.software_averages = 1 scanrange = np.arange(-precision_width, precision_width, precision_step) mc.settables(platform.LO_qrm.device.frequency) mc.setpoints(scanrange + shifted_LO_frequency) mc.gettables(Gettable(ROController(platform, sequence))) platform.start() dataset = mc.run("Resonator Spectroscopy Shifted Precision", soft_avg=platform.software_averages) platform.stop() smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2) shifted_frequency = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency shifted_max_ro_voltage = smooth_dataset.max() * 1e6 print('\n') print(f"\nResonator Frequency = {shifted_frequency}") print(f"Maximum Voltage Measured = {shifted_max_ro_voltage} μV") return shifted_frequency, shifted_max_ro_voltage, smooth_dataset, dataset # help classes class QCPulseLengthParameter(): label = 'Qubit Control Pulse Length' unit = 'ns' name = 'qc_pulse_length' def __init__(self, ro_pulse, qc_pulse): self.ro_pulse = ro_pulse self.qc_pulse = qc_pulse def set(self, value): self.qc_pulse.duration = value self.ro_pulse.start = value + 4 class QCPulseGainParameter(): label = 'Qubit Control Gain' unit = '%' name = 'qc_pulse_gain' def __init__(self, qcm): self.qcm = qcm def set(self,value): self.qcm.gain = value / 100 class QCPulseAmplitudeParameter(): label = 'Qubit Control Pulse Amplitude' unit = '%' name = 'qc_pulse_amplitude' def __init__(self, qc_pulse): self.qc_pulse = qc_pulse def set(self, value): self.qc_pulse.amplitude = value / 100 class T1WaitParameter(): label = 'Time' unit = 'ns' name = 't1_wait' initial_value = 0 def __init__(self, ro_pulse, qc_pulse): self.ro_pulse = ro_pulse self.base_duration = qc_pulse.duration def set(self, value): # TODO: implement following condition #must be >= 4ns <= 65535 #platform.delay_before_readout = value self.ro_pulse.start = self.base_duration + 4 + value class RamseyWaitParameter(): label = 'Time' unit = 'ns' name = 'ramsey_wait' initial_value = 0 def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length): self.ro_pulse = ro_pulse self.qc2_pulse = qc2_pulse self.pi_pulse_length = pi_pulse_length def set(self, value): self.qc2_pulse.start = self.pi_pulse_length // 2 + value self.ro_pulse.start = self.pi_pulse_length + value + 4 class SpinEchoWaitParameter(): label = 'Time' unit = 'ns' name = 'spin_echo_wait' initial_value = 0 def __init__(self, ro_pulse, qc2_pulse, pi_pulse_length): self.ro_pulse = ro_pulse self.qc2_pulse = qc2_pulse self.pi_pulse_length = pi_pulse_length def set(self, value): self.qc2_pulse.start = self.pi_pulse_length//2 + value self.ro_pulse.start = 3 * self.pi_pulse_length//2 + 2 * value + 4 class SpinEcho3PWaitParameter(): label = 'Time' unit = 'ns' name = 'spin_echo_wait' initial_value = 0 def __init__(self, ro_pulse, qc2_pulse, qc3_pulse, pi_pulse_length): self.ro_pulse = ro_pulse self.qc2_pulse = qc2_pulse self.qc3_pulse = qc3_pulse self.pi_pulse_length = pi_pulse_length def set(self,value): self.qc2_pulse.start = self.pi_pulse_length//2 + value self.qc3_pulse.start = (3 * self.pi_pulse_length)//2 + 2 * value self.ro_pulse.start = 2 * self.pi_pulse_length + 2 * value + 4 class QRPulseGainParameter(): label = 'Qubit Readout Gain' unit = '%' name = 'ro_pulse_gain' def __init__(self, qrm): self.qrm = qrm def set(self,value): self.qrm.gain = value / 100 doc/source/conf.py METASEP # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('.')) import qibolab # -- Project information ----------------------------------------------------- project = 'qibolab' copyright = '2021, The Qibo team' author = 'The Qibo team' release = qibolab.__version__ # -- General configuration --------------------------------------------------- # https://stackoverflow.com/questions/56336234/build-fail-sphinx-error-contents-rst-not-found # master_doc = "index" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx', 'recommonmark', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] src/qibolab/tests/regressions/__init__.py METASEP setup.py METASEP # Installation script for python from setuptools import setup, find_packages import os import re PACKAGE = "qibolab" # Returns the version def get_version(): """ Gets the version from the package's __init__ file if there is some problem, let it happily fail """ VERSIONFILE = os.path.join("src", PACKAGE, "__init__.py") initfile_lines = open(VERSIONFILE, "rt").readlines() VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]" for line in initfile_lines: mo = re.search(VSRE, line, re.M) if mo: return mo.group(1) # load long description from README this_directory = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f: long_description = f.read() setup( name=PACKAGE, version=get_version(), description="Quantum hardware backend for Qibo", author="The Qibo team", author_email="", url="https://github.com/qiboteam/qibolab", packages=find_packages("src"), package_dir={"": "src"}, package_data={"": ["*.json", "*.npy"]}, zip_safe=False, classifiers=[ "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering :: Physics", ], install_requires=[ "qibo", "visa", "pyvisa-py", "qcodes", ], extras_require={ "docs": [ "sphinx", "sphinx_rtd_theme", "recommonmark", "sphinxcontrib-bibtex", "sphinx_markdown_tables", "nbsphinx", "IPython"], # TII system dependencies "tiiq": [ "qblox-instruments==0.5.4", "qcodes==0.29.1", "lmfit", "quantify-core==0.5.1", "pyVISA==1.11.3", "pyVISA-py==0.5.2", ] }, python_requires=">=3.6.0", long_description=long_description, long_description_content_type='text/markdown', ) src/qibolab/tests/test_tomography.py METASEP import os import json import pathlib import pytest import numpy as np from qibolab.tomography import Tomography REGRESSION_FOLDER = pathlib.Path(__file__).with_name("regressions") def assert_regression_fixture(array, filename): """Check array matches data inside filename. Args: array: numpy array filename: fixture filename If filename does not exists, this function creates the missing file otherwise it loads from file and compare. """ filename = REGRESSION_FOLDER/filename try: target = np.load(filename) np.testing.assert_allclose(array, target) except: # pragma: no cover # case not tested in GitHub workflows because files exist np.save(filename, array) # def test_cholesky_init(): # m = np.random.random((5, 5)) # c = Cholesky.from_matrix(m) # np.testing.assert_allclose(c.matrix, m) # v = np.random.random((5,)) # c = Cholesky.from_vector(v) # np.testing.assert_allclose(c.vector, v) # with pytest.raises(ValueError): # c = Cholesky(matrix=m, vector=v) # with pytest.raises(TypeError): # c = Cholesky(matrix="test") # with pytest.raises(TypeError): # c = Cholesky(vector="test") # def test_cholesky_decompose(): # m = np.array([[1, 2, 3, 4, 5], # [2, 3, 4, 5, 6], # [3, 4, 5, 6, 7], # [4, 5, 6, 7, 8], # [5, 6, 7, 8, 9]]) # m = m + m.T # m = m + 5 * np.eye(5, dtype=m.dtype) # c = Cholesky.decompose(m) # target_matrix = np.array([[1, 0, 0, 0, 0], # [0, 2, 0, 0, 0], # [0, 0, 7, 0, 0], # [1, 2, 2, 4, 0], # [0, 0, 0, 0, 0]]) # target_vector = np.array([1, 2, 7, 4, 0, 0, 0, 0, 1, 2, 2, 0, 0, 0, 0, # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # np.testing.assert_allclose(c.matrix, target_matrix) # np.testing.assert_allclose(c.vector, target_vector) # def test_cholesky_reconstruct(): # v = np.arange(16) # c = Cholesky.from_vector(v) # target_matrix = np.array([ # [0.38709677+0.j, 0.32580645-0.01774194j, 0.21612903-0.02741935j, 0.01693548-0.03145161j], # [0.32580645+0.01774194j, 0.35564516+0.j, 0.23709677-0.02419355j, 0.01935484-0.03387097j], # [0.21612903+0.02741935j, 0.23709677+0.02419355j, 0.25+0.j, 0.02177419-0.03629032j], # [0.01693548+0.03145161j, 0.01935484+0.03387097j, 0.02177419+0.03629032j, 0.00725806+0.j]]) # np.testing.assert_allclose(c.reconstruct(), target_matrix, atol=1e-7) # def test_tomography_find_beta(): # amplitudes = np.random.random(16) # state = np.array([1, 2, 3, 4]) # tom = Tomography(amplitudes, state) # target_beta = [2.5, -1, -0.5, 0] # np.testing.assert_allclose(tom.find_beta(state), target_beta) @pytest.mark.skip def test_tomography_init(): n = 3 states = np.random.random((4**n, n)) gates = np.random.random((4**n, 2**n, 2**n)) tom = Tomography(states, gates) np.testing.assert_allclose(tom.states, states) np.testing.assert_allclose(tom.gates, gates) def test_tomography_default_gates(): n = 3 states = np.random.random((4**n, n)) tom = Tomography(states) assert_regression_fixture(tom.gates, "default_gates.npy") def test_tomography_linear(): n = 3 states = np.random.random((4**n, n)) tom = Tomography(states) assert_regression_fixture(tom.linear, "linear_estimation.npy") @pytest.mark.skip def test_tomography_fit(): n = 3 states = np.random.random((4**n, n)) tom = Tomography(states) with pytest.raises(ValueError): tom.fit tom.minimize() assert tom.success assert_regression_fixture(tom.fit, "mlefit_estimation.npy") def extract_json(filepath): with open(filepath, "r") as file: raw = json.loads(file.read()) data = np.stack(list(raw.values())) return np.sqrt((data ** 2).sum(axis=1)) @pytest.mark.skip @pytest.mark.parametrize("state_value,target_fidelity", [(0, 93.01278047175582), (1, 82.30795926024483), (2, 65.06114271984393), (3, 22.230579223385284)]) def test_tomography_example(state_value, target_fidelity): state_path = REGRESSION_FOLDER / "states_181120.json" amplitude_path = "tomo_181120-{0:02b}.json".format(state_value) amplitude_path = REGRESSION_FOLDER / amplitude_path state = extract_json(state_path) amp = extract_json(amplitude_path) tom = Tomography(amp, state) tom.minimize() assert tom.success rho_theory = np.zeros((4, 4), dtype=complex) rho_theory[state_value, state_value] = 1 fidelity = tom.fidelity(rho_theory) np.testing.assert_allclose(fidelity, target_fidelity) src/qibolab/tests/test_pulses.py METASEP import pytest import numpy as np from qibolab import pulses from qibolab.pulse_shapes import Rectangular, Gaussian, Drag, SWIPHT from qibolab.circuit import PulseSequence def test_basic_pulse(): basic = pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, "Rectangular") target_repr = "P(0, 0.5, 1.5, 0.8, 40.0, 0.7, Rectangular)" assert repr(basic) == target_repr def test_multifrequency_pulse(): members = [ pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, "Rectangular", channel=0), pulses.Pulse(0.5, 5.0, 0.7, 100, 0.5, "Gaussian", channel=1), pulses.Pulse(1.0, 3.5, 0.4, 70.0, 0.7, "Rectangular", channel=2) ] multi = pulses.MultifrequencyPulse(members) target_repr = "M(P(0, 0.5, 1.5, 0.8, 40.0, 0.7, Rectangular), "\ "P(1, 0.5, 5.0, 0.7, 100, 0.5, Gaussian), "\ "P(2, 1.0, 3.5, 0.4, 70.0, 0.7, Rectangular))" assert repr(multi) == target_repr def test_file_pulse(): filep = pulses.FilePulse(0, 1.0, "testfile") target_repr = "F(0, 1.0, testfile)" assert repr(filep) == target_repr def test_rectangular_shape(): rect = Rectangular() assert rect.name == "rectangular" assert rect.envelope(1.0, 0.2, 2.2, 4.5) == 4.5 def test_gaussian_shape(): gauss = Gaussian(1.5) assert gauss.name == "gaussian" assert gauss.sigma == 1.5 assert repr(gauss) == "(gaussian, 1.5)" target_envelop = 4.4108940298803985 time = np.array([1.0]) assert gauss.envelope(time, 0.2, 2.2, 4.5) == target_envelop def test_drag_shape(): drag = Drag(1.5, 2.5) assert drag.name == "drag" assert drag.sigma == 1.5 assert drag.beta == 2.5 assert repr(drag) == "(drag, 1.5, 2.5)" target_envelop = 4.4108940298803985 + 1.470298009960133j time = np.array([1.0]) assert drag.envelope(time, 0.2, 2.2, 4.5) == target_envelop def test_swipht_shape(): swipht = SWIPHT(2.2) assert swipht.name == "SWIPHT" assert swipht.g == 2.2 assert repr(swipht) == "(SWIPHT, 2.2)" target_envelop = 4.4108940298803985 time = np.array([1.0]) assert swipht.envelope(time, 0.2, 2.2, 4.5) == 4.5 # TODO: Fix these tests so that waveform is not zero @pytest.mark.skip("Pulse compile was changed after implementing TIIq.") def test_basic_pulse_compile(): seq = PulseSequence([]) waveform = np.zeros((seq.nchannels, seq.sample_size)) basic = pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Gaussian(1.0)) waveform = basic.compile(waveform, seq) target_waveform = np.zeros_like(waveform) np.testing.assert_allclose(waveform, target_waveform) @pytest.mark.skip("Pulse compile was changed after implementing TIIq.") def test_multifrequency_pulse_compile(): seq = PulseSequence([]) waveform = np.zeros((seq.nchannels, seq.sample_size), dtype="complex128") members = [ pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Gaussian(1.0)), pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Drag(1.0, 1.5)) ] multi = pulses.MultifrequencyPulse(members) waveform = multi.compile(waveform, seq) target_waveform = np.zeros_like(waveform) np.testing.assert_allclose(waveform, target_waveform) @pytest.mark.skip("Skipping this test because `sequence.file_dir` is not available") def test_file_pulse_compile(): seq = PulseSequence([]) waveform = np.zeros((seq.nchannels, seq.sample_size)) filep = pulses.FilePulse(0, 1.0, "file") waveform = filep.compile(waveform, seq) target_waveform = np.zeros_like(waveform) np.testing.assert_allclose(waveform, target_waveform) src/qibolab/tests/test_experiments.py METASEP import pytest import qibolab import pyvisa def test_experiment_getter_setter(): assert qibolab.get_experiment() == "icarusq" with pytest.raises(KeyError): qibolab.set_experiment("test") qibolab.set_experiment("icarusq") @pytest.mark.xfail(raises=pyvisa.errors.VisaIOError) def test_icarusq_awg_setter(): assert qibolab.get_experiment() == "icarusq" qibolab.set_experiment("awg") qibolab.set_experiment("icarusq") src/qibolab/tests/test_circuit.py METASEP import pytest import numpy as np import qibo from qibo import gates, models from qibolab import pulses from qibolab.pulse_shapes import Gaussian, Drag from qibolab.circuit import PulseSequence, HardwareCircuit # TODO: Parametrize these tests using experiment @pytest.mark.skip def test_pulse_sequence_init(): seq = PulseSequence([]) assert seq.pulses == [] assert seq.duration == 1.391304347826087e-05 assert seq.sample_size == 32000 seq = PulseSequence([], duration=2e-6) assert seq.pulses == [] assert seq.duration == 2e-6 assert seq.sample_size == 4600 @pytest.mark.skip("Skipping this test because `seq.file_dir` is not available") def test_pulse_sequence_compile(): seq = PulseSequence([ pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Gaussian(1.0)), pulses.FilePulse(0, 1.0, "file"), pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Drag(1.0, 1.5)) ]) waveform = seq.compile() target_waveform = np.zeros_like(waveform) np.testing.assert_allclose(waveform, target_waveform) def test_pulse_sequence_serialize(): seq = PulseSequence([ pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Gaussian(1.0)), pulses.FilePulse(0, 1.0, "file"), pulses.Pulse(0.5, 1.5, 0.8, 40.00, 0.7, Drag(1.0, 1.5)) ]) target_repr = "P(0, 0.5, 1.5, 0.8, 40.0, 0.7, (gaussian, 1.0)), "\ "F(0, 1.0, file), "\ "P(0, 0.5, 1.5, 0.8, 40.0, 0.7, (drag, 1.0, 1.5))" assert seq.serialize() == target_repr def test_hardwarecircuit_errors(): qibo.set_backend("qibolab") c = models.Circuit(5) with pytest.raises(NotImplementedError): c._add_layer() with pytest.raises(NotImplementedError): c.fuse() @pytest.mark.skip def test_hardwarecircuit_sequence_duration(): from qibolab import experiment qibo.set_backend("qibolab") c = models.Circuit(2) c.add(gates.RX(0, theta=0.123)) c.add(gates.RY(0, theta=0.123)) c.add(gates.H(0)) c.add(gates.Align(0)) c.add(gates.M(0)) c.qubit_config = experiment.static.initial_calibration qubit_times = c._calculate_sequence_duration(c.queue) # pylint: disable=E1101 target_qubit_times = [3.911038e-08, 0] np.testing.assert_allclose(qubit_times, target_qubit_times) @pytest.mark.skip def test_hardwarecircuit_create_pulse_sequence(): from qibolab import experiment qibo.set_backend("qibolab") c = models.Circuit(2) c.add(gates.RX(0, theta=0.123)) c.add(gates.RY(0, theta=0.123)) c.add(gates.H(0)) c.add(gates.Align(0)) c.add(gates.M(0)) c.qubit_config = experiment.static.initial_calibration c.qubit_config[0]["gates"]["measure"] = [] qubit_times = np.zeros(c.nqubits) - c._calculate_sequence_duration(c.queue) # pylint: disable=E1101 qubit_phases = np.zeros(c.nqubits) pulse_sequence = c.create_pulse_sequence(c.queue, qubit_times, qubit_phases) # pylint: disable=E1101 target_pulse_sequence = "P(3, -1.940378868990046e-09, 9.70189434495023e-10, 0.375, 747382500.0, 0.0, (rectangular)), "\ "P(3, -9.70189434495023e-10, 9.70189434495023e-10, 0.375, 747382500.0, 90.0, (rectangular))" pulse_sequence.serialize() == target_pulse_sequence src/qibolab/tests/__init__.py METASEP src/qibolab/platforms/qbloxplatform.py METASEP from qibo.config import raise_error, log from qibolab.platforms.abstract import AbstractPlatform class QBloxPlatform(AbstractPlatform): """Platform for controlling quantum devices using QCM and QRM. Example: .. code-block:: python from qibolab import Platform platform = Platform("tiiq") """ def __init__(self, name, runcard): self._qrm = None self._qcm = None self._LO_qrm = None self._LO_qcm = None super().__init__(name, runcard) self.last_qcm_pulses = None self.last_qrm_pulses = None @property def qrm(self): """Reference to :class:`qibolab.instruments.qblox.PulsarQRM` instrument.""" self._check_connected() return self._qrm @property def qcm(self): """Reference to :class:`qibolab.instruments.qblox.PulsarQCM` instrument.""" self._check_connected() return self._qcm @property def LO_qrm(self): """Reference to QRM local oscillator (:class:`qibolab.instruments.rohde_schwarz.SGS100A`).""" self._check_connected() return self._LO_qrm @property def LO_qcm(self): """Reference to QCM local oscillator (:class:`qibolab.instruments.rohde_schwarz.SGS100A`).""" self._check_connected() return self._LO_qcm def connect(self): """Connects to lab instruments using the details specified in the calibration settings.""" if not self.is_connected: log.info(f"Connecting to {self.name} instruments.") try: from qibolab.instruments import PulsarQRM, PulsarQCM, SGS100A self._qrm = PulsarQRM( **self._settings.get("QRM_init_settings")) self._qcm = PulsarQCM( **self._settings.get("QCM_init_settings")) self._LO_qrm = SGS100A( **self._settings.get("LO_QRM_init_settings")) self._LO_qcm = SGS100A( **self._settings.get("LO_QCM_init_settings")) self.is_connected = True except Exception as exception: raise_error(RuntimeError, "Cannot establish connection to " f"{self.name} instruments. " f"Error captured: '{exception}'") def setup(self): """Configures instruments using the loaded calibration settings.""" if self.is_connected: self._qrm.setup(**self._settings.get("QRM_settings")) self._qcm.setup(**self._settings.get("QCM_settings")) self._LO_qrm.setup(**self._settings.get("LO_QRM_settings")) self._LO_qcm.setup(**self._settings.get("LO_QCM_settings")) def start(self): """Turns on the local oscillators. The QBlox insturments are turned on automatically during execution after the required pulse sequences are loaded. """ self._LO_qcm.on() self._LO_qrm.on() def stop(self): """Turns off all the lab instruments.""" self.LO_qrm.off() self.LO_qcm.off() self.qrm.stop() self.qcm.stop() def disconnect(self): """Disconnects from the lab instruments.""" if self.is_connected: self._LO_qrm.close() self._LO_qcm.close() self._qrm.close() self._qcm.close() self.is_connected = False def execute(self, sequence, nshots=None): """Executes a pulse sequence. Pulses are being cached so that are not reuploaded if they are the same as the ones sent previously. This greatly accelerates some characterization routines that recurrently use the same set of pulses, i.e. qubit and resonator spectroscopy, spin echo, and future circuits based on fixed gates. Args: sequence (:class:`qibolab.pulses.PulseSequence`): Pulse sequence to execute. nshots (int): Number of shots to sample from the experiment. If ``None`` the default value provided as hardware_avg in the calibration json will be used. Returns: Readout results acquired by :class:`qibolab.instruments.qblox.PulsarQRM` after execution. """ if not self.is_connected: raise_error( RuntimeError, "Execution failed because instruments are not connected.") if nshots is None: nshots = self.hardware_avg # Translate and upload instructions to instruments if sequence.qcm_pulses: if self.last_qcm_pulses != [pulse.serial() for pulse in sequence.qcm_pulses]: waveforms, program = self._qcm.translate(sequence, self.delay_before_readout, nshots) self._qcm.upload(waveforms, program, self.data_folder) if sequence.qrm_pulses: if self.last_qrm_pulses != [pulse.serial() for pulse in sequence.qrm_pulses]: waveforms, program = self._qrm.translate(sequence, self.delay_before_readout, nshots) self._qrm.upload(waveforms, program, self.data_folder) # Execute instructions if sequence.qcm_pulses: self._qcm.play_sequence() if sequence.qrm_pulses: # TODO: Find a better way to pass the readout pulse here acquisition_results = self._qrm.play_sequence_and_acquire( sequence.qrm_pulses[0]) else: acquisition_results = None self.last_qcm_pulses = [pulse.serial() for pulse in sequence.qcm_pulses] self.last_qrm_pulses = [pulse.serial() for pulse in sequence.qrm_pulses] return acquisition_results src/qibolab/platforms/icplatform.py METASEP import copy from qibo.config import raise_error, log from qibolab.platforms.abstract import AbstractPlatform class Qubit: """Describes a single qubit in pulse control and readout extraction. Args: pi_pulse (dict): Qubit pi-pulse parameters. See qibolab.pulses.Pulse for more information. readout_pulse (dict): Qubit readout pulse parameters. See qibolab.pulses.ReadoutPulse for more information. resonator_spectroscopy_max_ro_voltage (float): Readout voltage corresponding to the ground state of the qubit. rabi_oscillations_pi_pulse_min_voltage (float): Readout voltage corresponding to the excited state of the qubit. playback (str): Instrument name for playing the qubit XY control pulses. playback_readout (str): Instrument name for playing the qubit readout pulse. readout_frequency (float): Readout frequency for IQ demodulation. readout (str): Instrument name for reading the qubit. readout_channels (int, int[]): Channels on the instrument associated to qubit readout. """ def __init__(self, pi_pulse, readout_pulse, readout_frequency, resonator_spectroscopy_max_ro_voltage, rabi_oscillations_pi_pulse_min_voltage, playback, playback_readout, readout, readout_channels): self.pi_pulse = pi_pulse self.readout_pulse = readout_pulse self.readout_frequency = readout_frequency self.max_readout_voltage = resonator_spectroscopy_max_ro_voltage self.min_readout_voltage = rabi_oscillations_pi_pulse_min_voltage self.playback = playback self.playback_readout = playback_readout self.readout = readout self.readout_channels = readout_channels class ICPlatform(AbstractPlatform): """Platform for controlling quantum devices with IC. Example: .. code-block:: python from qibolab import Platform platform = Platform("icarusq") """ def __init__(self, name, runcard): self._instruments = [] self._lo = [] self._adc = [] self._last_sequence = None super().__init__(name, runcard) self.qubits = [] qubits = self._settings.get("qubits") for qubit_dict in qubits.values(): self.qubits.append(Qubit(**qubit_dict)) def connect(self): """Connects to lab instruments using the details specified in the calibration settings.""" if not self.is_connected: log.info(f"Connecting to {self.name} instruments.") try: import qibolab.instruments as qi instruments = self._settings.get("instruments") for params in instruments.values(): inst = getattr(qi, params.get("type"))(**params.get("init_settings")) self._instruments.append(inst) # Use yaml config to track instrument type if params.get("lo"): self._lo.append(inst) if params.get("adc"): self._adc.append(inst) self.is_connected = True except Exception as exception: raise_error(RuntimeError, "Cannot establish connection to " f"{self.name} instruments. " f"Error captured: '{exception}'") def setup(self): """Configures instruments using the loaded calibration settings.""" if self.is_connected: instruments = self._settings.get("instruments") for inst in self._instruments: inst.setup(**instruments.get(inst.name).get("settings")) def start(self): """Turns on the local oscillators. At this point, the pulse sequence have not been uploaded to the DACs, so they will not be started yet. """ for lo in self._lo: lo.start() def stop(self): """Turns off all the lab instruments.""" for inst in self._instruments: inst.stop() def disconnect(self): """Disconnects from the lab instruments.""" if self.is_connected: for inst in self._instruments: inst.close() self._instruments = [] self._lo = [] self._adc = [] self.is_connected = False def execute(self, sequence, nshots=None): """Executes a pulse sequence. Pulses are being cached so that are not reuploaded if they are the same as the ones sent previously. This greatly accelerates some characterization routines that recurrently use the same set of pulses, i.e. qubit and resonator spectroscopy, spin echo, and future circuits based on fixed gates. Args: sequence (:class:`qibolab.pulses.PulseSequence`): Pulse sequence to execute. nshots (int): Number of shots to sample from the experiment. If ``None`` the default value provided as hardware_avg in the calibration json will be used. Returns: Readout results acquired by the assigned readout instrument after execution. """ if not self.is_connected: raise_error( RuntimeError, "Execution failed because instruments are not connected.") if nshots is None: nshots = self.hardware_avg from qibolab.pulses import ReadoutPulse qubits_to_measure = [] measurement_results = [] pulse_mapping = {} seq_serial = {} for pulse in sequence.pulses: # Assign pulses to each respective waveform generator qubit = self.fetch_qubit(pulse.qubit) playback_device = qubit.playback # Track each qubit to measure if isinstance(pulse, ReadoutPulse): qubits_to_measure.append(pulse.qubit) playback_device = qubit.playback_readout if playback_device not in pulse_mapping.keys(): pulse_mapping[playback_device] = [] seq_serial[playback_device] = [] # Map the pulse to the associated playback instrument. pulse_mapping[playback_device].append(pulse) seq_serial[playback_device].append(pulse.serial) # Translate and upload the pulse subsequence for each device if needed for device, subsequence in pulse_mapping.items(): inst = self.fetch_instrument(device) if self._last_sequence is None or seq_serial[device] != self._last_sequence[device]: inst.upload(inst.translate(subsequence, nshots)) inst.play_sequence() self._last_sequence = seq_serial for adc in self._adc: adc.arm(nshots) # Start the experiment sequence self.start_experiment() # Fetch the experiment results for qubit_id in set(qubits_to_measure): qubit = self.fetch_qubit(qubit_id) inst = self.fetch_instrument(qubit.readout) measurement_results.append(inst.result(qubit.readout_frequency, qubit.readout_channels)) if len(qubits_to_measure) == 1: return measurement_results[0] return measurement_results def fetch_instrument(self, name): """Returns a reference to an instrument. """ try: res = next(inst for inst in self._instruments if inst.name == name) return res except StopIteration: raise_error(Exception, "Instrument not found") def fetch_qubit(self, qubit_id=0) -> Qubit: """Fetches the qubit based on the id. """ return self.qubits[qubit_id] def start_experiment(self): """Starts the instrument to start the experiment sequence. """ inst = self.fetch_instrument(self._settings.get("settings").get("experiment_start_instrument")) inst.start_experiment() def fetch_qubit_pi_pulse(self, qubit_id=0) -> dict: """Fetches the qubit pi-pulse. """ # Use copy to avoid mutability return copy.copy(self.fetch_qubit(qubit_id).pi_pulse) def fetch_qubit_readout_pulse(self, qubit_id=0) -> dict: """Fetches the qubit readout pulse. """ # Use copy to avoid mutability return copy.copy(self.fetch_qubit(qubit_id).readout_pulse) src/qibolab/platforms/abstract.py METASEP from abc import ABC, abstractmethod import yaml from qibo.config import raise_error, log class AbstractPlatform(ABC): """Abstract platform for controlling quantum devices. Args: name (str): name of the platform. runcard (str): path to the yaml file containing the platform setup. """ def __init__(self, name, runcard): log.info(f"Loading platform {name}") log.info(f"Loading runcard {runcard}") self.name = name self.runcard = runcard # Load calibration settings with open(runcard, "r") as file: self._settings = yaml.safe_load(file) # Define references to instruments self.is_connected = False def _check_connected(self): if not self.is_connected: raise_error(RuntimeError, "Cannot access instrument because it is not connected.") def reload_settings(self): with open(self.runcard, "r") as file: self._settings = yaml.safe_load(file) self.setup() @property def settings(self): return self._settings @property def data_folder(self): return self._settings.get("settings").get("data_folder") @property def hardware_avg(self): return self._settings.get("settings").get("hardware_avg") @property def sampling_rate(self): return self._settings.get("settings").get("sampling_rate") @property def software_averages(self): return self._settings.get("settings").get("software_averages") @software_averages.setter def software_averages(self, x): self._settings["settings"]["software_averages"] = x @property def repetition_duration(self): return self._settings.get("settings").get("repetition_duration") @property def resonator_frequency(self): return self._settings.get("settings").get("resonator_freq") @property def qubit_frequency(self): return self._settings.get("settings").get("qubit_freq") @property def pi_pulse_gain(self): return self._settings.get("settings").get("pi_pulse_gain") @property def pi_pulse_amplitude(self): return self._settings.get("settings").get("pi_pulse_amplitude") @property def pi_pulse_duration(self): return self._settings.get("settings").get("pi_pulse_duration") @property def pi_pulse_frequency(self): return self._settings.get("settings").get("pi_pulse_frequency") @property def readout_pulse(self): return self._settings.get("settings").get("readout_pulse") @property def max_readout_voltage(self): return self._settings.get("settings").get("resonator_spectroscopy_max_ro_voltage") @property def min_readout_voltage(self): return self._settings.get("settings").get("rabi_oscillations_pi_pulse_min_voltage") @property def delay_between_pulses(self): return self._settings.get("settings").get("delay_between_pulses") @property def delay_before_readout(self): return self._settings.get("settings").get("delay_before_readout") def run_calibration(self, runcard): """Executes calibration routines and updates the settings json.""" # TODO: Implement calibration routines and update ``self._settings``. # update instruments with new calibration settings self.setup() # save new calibration settings to json with open(runcard, "w") as file: yaml.dump(self._settings, file) def __call__(self, sequence, nshots=None): return self.execute(sequence, nshots) @abstractmethod def connect(self): """Connects to lab instruments using the details specified in the calibration settings.""" raise_error(NotImplementedError) @abstractmethod def setup(self): """Configures instruments using the loaded calibration settings.""" raise_error(NotImplementedError) @abstractmethod def start(self): """Turns on the local oscillators.""" raise_error(NotImplementedError) @abstractmethod def stop(self): """Turns off all the lab instruments.""" raise_error(NotImplementedError) @abstractmethod def disconnect(self): """Disconnects from the lab instruments.""" raise_error(NotImplementedError) @abstractmethod def execute(self, sequence, nshots=None): """Executes a pulse sequence. Args: sequence (:class:`qibolab.pulses.PulseSequence`): Pulse sequence to execute. nshots (int): Number of shots to sample from the experiment. If ``None`` the default value provided as hardware_avg in the calibration json will be used. Returns: Readout results acquired by after execution. """ raise_error(NotImplementedError) src/qibolab/platforms/__init__.py METASEP src/qibolab/instruments/rohde_schwarz.py METASEP """ Class to interface with the local oscillator RohdeSchwarz SGS100A """ import logging from qibolab.instruments.instrument import Instrument, InstrumentException logger = logging.getLogger(__name__) # TODO: Consider using a global logger class SGS100A(Instrument): def __init__(self, label, ip): """ create Local Oscillator with name = label and connect to it in local IP = ip Params format example: "ip": '192.168.0.8', "label": "qcm_LO" """ super().__init__(ip) self.device = None self._power = None self._frequency = None self._connected = False self._signature = f"{type(self).__name__}@{ip}" self.label = label self.connect() def connect(self): import qcodes.instrument_drivers.rohde_schwarz.SGS100A as LO_SGS100A try: self.device = LO_SGS100A.RohdeSchwarz_SGS100A(self.label, f"TCPIP0::{self.ip}::inst0::INSTR") except Exception as exc: raise InstrumentException(self, str(exc)) self._connected = True logger.info("Local oscillator connected") def setup(self, power, frequency): self.set_power(power) self.set_frequency(frequency) def set_power(self, power): """Set dbm power to local oscillator.""" self._power = power self.device.power(power) logger.info(f"Local oscillator power set to {power}.") def set_frequency(self, frequency): self._frequency = frequency self.device.frequency(frequency) logger.info(f"Local oscillator frequency set to {frequency}.") def get_power(self): if self._power is not None: return self._power raise RuntimeError("Local oscillator power was not set.") def get_frequency(self): if self._frequency is not None: return self._frequency raise RuntimeError("Local oscillator frequency was not set.") def on(self): """Start generating microwaves.""" self.device.on() logger.info("Local oscillator on.") def off(self): """Stop generating microwaves.""" self.device.off() logger.info("Local oscillator off.") def close(self): if self._connected: self.off() self.device.close() self._connected = False # TODO: Figure out how to fix this #def __del__(self): # self.close() src/qibolab/instruments/qblox.py METASEP from abc import abstractmethod import json import numpy as np from abc import ABC, abstractmethod from qibo.config import raise_error from qibolab.instruments.instrument import Instrument, InstrumentException import logging logger = logging.getLogger(__name__) # TODO: Consider using a global logger class GenericPulsar(Instrument, ABC): def __init__(self, label, ip, sequencer, ref_clock, sync_en, is_cluster): super().__init__(ip) self.label = label # TODO When updating to the new firmware, use a sequencer mapping instead of setting a single sequencer self.sequencer = sequencer self.ref_clock = ref_clock self.sync_en = sync_en self.is_cluster = is_cluster self._connected = False self.Device = None self.device = None # To be defined in each instrument self.name = None # To be defined during setup self.hardware_avg = None self.initial_delay = None self.repetition_duration = None # hardcoded values used in ``generate_program`` self.delay_before_readout = 4 # same value is used for all readout pulses (?) self.wait_loop_step = 1000 self.duration_base = 16380 # maximum length of a waveform in number of samples (defined by the device memory). # hardcoded values used in ``upload`` # TODO QCM shouldn't have acquisitions self.acquisitions = {"single": {"num_bins": 1, "index":0}} self.weights = {} def connect(self): """Connects to the instruments.""" if not self._connected: try: self.device = self.Device(self.label, self.ip) except Exception as exc: raise InstrumentException(self, str(exc)) self._connected = True else: raise RuntimeError @property def gain(self): return self._gain @gain.setter def gain(self, gain): self._gain = gain if self.sequencer == 1: self.device.sequencer1_gain_awg_path0(gain) self.device.sequencer1_gain_awg_path1(gain) else: self.device.sequencer0_gain_awg_path0(gain) self.device.sequencer0_gain_awg_path1(gain) def setup(self, gain, initial_delay, repetition_duration): """Sets calibration setting to QBlox instruments. Args: gain (float): initial_delay (float): repetition_duration (float): """ self.gain = gain self.initial_delay = initial_delay self.repetition_duration = repetition_duration def _translate_single_pulse(self, pulse): """Translates a single pulse to the instrument waveform format. Helper method for :meth:`qibolab.instruments.qblox.GenericPulsar.generate_waveforms`. Args: pulse (:class:`qibolab.pulses.Pulse`): Pulse object to translate. Returns: Dictionary containing the waveform corresponding to the pulse. """ # Use the envelope to modulate a sinusoldal signal of frequency freq_if envelope_i = pulse.compile() # TODO: if ``envelope_q`` is not always 0 we need to find how to # calculate it envelope_q = np.zeros(int(pulse.duration)) time = np.arange(pulse.duration) * 1e-9 # FIXME: There should be a simpler way to construct this array cosalpha = np.cos(2 * np.pi * pulse.frequency * time + pulse.phase) sinalpha = np.sin(2 * np.pi * pulse.frequency * time + pulse.phase) mod_matrix = np.array([[cosalpha,sinalpha], [-sinalpha,cosalpha]]) result = [] for it, t, ii, qq in zip(np.arange(pulse.duration), time, envelope_i, envelope_q): result.append(mod_matrix[:, :, it] @ np.array([ii, qq])) mod_signals = np.array(result) # add offsets to compensate mixer leakage waveform = { "modI": {"data": mod_signals[:, 0] + pulse.offset_i, "index": 0}, "modQ": {"data": mod_signals[:, 1] + pulse.offset_q, "index": 1} } return waveform def generate_waveforms(self, pulses): """Translates a list of pulses to the instrument waveform format. Args: pulses (list): List of :class:`qibolab.pulses.Pulse` objects. Returns: Dictionary containing waveforms corresponding to all pulses. """ if not pulses: raise_error(NotImplementedError, "Cannot translate empty pulse sequence.") name = self.name combined_length = max(pulse.start + pulse.duration for pulse in pulses) waveforms = { f"modI_{name}": {"data": np.zeros(combined_length), "index": 0}, f"modQ_{name}": {"data": np.zeros(combined_length), "index": 1} } for pulse in pulses: waveform = self._translate_single_pulse(pulse) i0, i1 = pulse.start, pulse.start + pulse.duration waveforms[f"modI_{name}"]["data"][i0:i1] += waveform["modI"]["data"] waveforms[f"modQ_{name}"]["data"][i0:i1] += waveform["modQ"]["data"] #Fixing 0s addded to the qrm waveform. Needs to be improved, but working well on TIIq for pulse in pulses: if(pulse.channel == "qrm"): waveforms[f"modI_{name}"]["data"] = waveforms[f"modI_{name}"]["data"][pulse.start:] waveforms[f"modQ_{name}"]["data"] = waveforms[f"modQ_{name}"]["data"][pulse.start:] return waveforms def generate_program(self, hardware_avg, initial_delay, delay_before_readout, acquire_instruction, wait_time): """Generates the program to be uploaded to instruments.""" extra_duration = self.repetition_duration - self.duration_base extra_wait = extra_duration % self.wait_loop_step num_wait_loops = (extra_duration - extra_wait) // self. wait_loop_step # This calculation was moved to `PulsarQCM` and `PulsarQRM` #if ro_pulse is not None: # acquire_instruction = "acquire 0,0,4" # wait_time = self.duration_base - initial_delay - delay_before_readout - 4 #else: # acquire_instruction = "" # wait_time = self.duration_base - initial_delay - delay_before_readout if initial_delay != 0: initial_wait_instruction = f"wait {initial_delay}" else: initial_wait_instruction = "" program = f""" move {hardware_avg},R0 nop wait_sync 4 # Synchronize sequencers over multiple instruments loop: {initial_wait_instruction} play 0,1,{delay_before_readout} {acquire_instruction} wait {wait_time} move {num_wait_loops},R1 nop repeatloop: wait {self.wait_loop_step} loop R1,@repeatloop wait {extra_wait} loop R0,@loop stop """ return program @abstractmethod def translate(self, sequence, nshots): """Translates an abstract pulse sequence to QBlox format. Args: sequence (:class:`qibolab.pulses.PulseSequence`): Pulse sequence. Returns: The waveforms (dict) and program (str) required to execute the pulse sequence on QBlox instruments. """ raise_error(NotImplementedError) def upload(self, waveforms, program, data_folder): """Uploads waveforms and programs to QBlox sequencer to prepare execution.""" import os # Upload waveforms and program # Reformat waveforms to lists for name, waveform in waveforms.items(): if isinstance(waveform["data"], np.ndarray): waveforms[name]["data"] = waveforms[name]["data"].tolist() # JSON only supports lists # Add sequence program and waveforms to single dictionary and write to JSON file filename = f"{data_folder}/{self.name}_sequence.json" program_dict = { "waveforms": waveforms, "weights": self.weights, "acquisitions": self.acquisitions, "program": program } if not os.path.exists(data_folder): os.makedirs(data_folder) with open(filename, "w", encoding="utf-8") as file: json.dump(program_dict, file, indent=4) # Upload json file to the device if self.sequencer == 1: self.device.sequencer1_waveforms_and_program(os.path.join(os.getcwd(), filename)) else: self.device.sequencer0_waveforms_and_program(os.path.join(os.getcwd(), filename)) def play_sequence(self): """Executes the uploaded instructions.""" # arm sequencer and start playing sequence self.device.arm_sequencer() self.device.start_sequencer() def stop(self): """Stops the QBlox sequencer from sending pulses.""" self.device.stop_sequencer() def close(self): """Disconnects from the instrument.""" if self._connected: self.stop() self.device.close() self._connected = False # TODO: Figure out how to fix this #def __del__(self): # self.close() class PulsarQRM(GenericPulsar): """Class for interfacing with Pulsar QRM.""" def __init__(self, label, ip, ref_clock="external", sequencer=0, sync_en=True, hardware_avg_en=True, acq_trigger_mode="sequencer", is_cluster=True): super().__init__(label, ip, sequencer, ref_clock, sync_en, is_cluster) # Instantiate base object from qblox library and connect to it self.name = "qrm" if self.is_cluster: from cluster.cluster import cluster_qrm self.Device = cluster_qrm else: from pulsar_qrm.pulsar_qrm import pulsar_qrm self.Device = pulsar_qrm self.connect() self.sequencer = sequencer self.hardware_avg_en = hardware_avg_en # Reset and configure self.device.reset() self.device.reference_source(ref_clock) self.device.scope_acq_sequencer_select(sequencer) self.device.scope_acq_avg_mode_en_path0(hardware_avg_en) self.device.scope_acq_avg_mode_en_path1(hardware_avg_en) self.device.scope_acq_trigger_mode_path0(acq_trigger_mode) self.device.scope_acq_trigger_mode_path1(acq_trigger_mode) # sync sequencer if self.sequencer == 1: self.device.sequencer1_sync_en(sync_en) else: self.device.sequencer0_sync_en(sync_en) def setup(self, gain, initial_delay, repetition_duration, start_sample, integration_length, sampling_rate, mode): super().setup(gain, initial_delay, repetition_duration) self.start_sample = start_sample self.integration_length = integration_length self.sampling_rate = sampling_rate self.mode = mode def translate(self, sequence, delay_before_readout, nshots): # Allocate only readout pulses to PulsarQRM waveforms = self.generate_waveforms(sequence.qrm_pulses) # Generate program without acquire instruction initial_delay = sequence.qrm_pulses[0].start # Acquire waveforms over remaining duration of acquisition of input vector of length = 16380 with integration weights 0,0 acquire_instruction = "acquire 0,0,4" wait_time = self.duration_base - initial_delay - delay_before_readout - 4 # FIXME: Not sure why this hardcoded 4 is needed program = self.generate_program(nshots, initial_delay, delay_before_readout, acquire_instruction, wait_time) return waveforms, program def play_sequence_and_acquire(self, ro_pulse): """Executes the uploaded instructions and retrieves the readout results. Args: ro_pulse (:class:`qibolab.pulses.Pulse`): Readout pulse to use for retrieving the results. """ #arm sequencer and start playing sequence super().play_sequence() #start acquisition of data #Wait for the sequencer to stop with a timeout period of one minute. self.device.get_sequencer_state(0, 1) #Wait for the acquisition to finish with a timeout period of one second. self.device.get_acquisition_state(self.sequencer, 1) #Move acquisition data from temporary memory to acquisition list. self.device.store_scope_acquisition(self.sequencer, "single") #Get acquisition list from instrument. single_acq = self.device.get_acquisitions(self.sequencer) i, q = self._demodulate_and_integrate(single_acq, ro_pulse) acquisition_results = np.sqrt(i**2 + q**2), np.arctan2(q, i), i, q return acquisition_results def _demodulate_and_integrate(self, single_acq, ro_pulse): #DOWN Conversion norm_factor = 1. / (self.integration_length) n0 = self.start_sample n1 = self.start_sample + self.integration_length input_vec_I = np.array(single_acq["single"]["acquisition"]["scope"]["path0"]["data"][n0: n1]) input_vec_Q = np.array(single_acq["single"]["acquisition"]["scope"]["path1"]["data"][n0: n1]) input_vec_I -= np.mean(input_vec_I) input_vec_Q -= np.mean(input_vec_Q) if self.mode == 'ssb': modulated_i = input_vec_I modulated_q = input_vec_Q time = np.arange(modulated_i.shape[0])*1e-9 cosalpha = np.cos(2 * np.pi * ro_pulse.frequency * time) sinalpha = np.sin(2 * np.pi * ro_pulse.frequency * time) demod_matrix = 2 * np.array([[cosalpha, -sinalpha], [sinalpha, cosalpha]]) result = [] for it, t, ii, qq in zip(np.arange(modulated_i.shape[0]), time,modulated_i, modulated_q): result.append(demod_matrix[:,:,it] @ np.array([ii, qq])) demodulated_signal = np.array(result) integrated_signal = norm_factor*np.sum(demodulated_signal,axis=0) elif self.mode == 'optimal': raise_error(NotImplementedError, "Optimal Demodulation Mode not coded yet.") else: raise_error(NotImplementedError, "Demodulation mode not understood.") return integrated_signal class PulsarQCM(GenericPulsar): def __init__(self, label, ip, sequencer=0, ref_clock="external", sync_en=True, is_cluster=True): super().__init__(label, ip, sequencer, ref_clock, sync_en, is_cluster) # Instantiate base object from qblox library and connect to it self.name = "qcm" if self.is_cluster: from cluster.cluster import cluster_qcm self.Device = cluster_qcm else: from pulsar_qcm.pulsar_qcm import pulsar_qcm self.Device = pulsar_qcm self.connect() self.sequencer = sequencer # Reset and configure self.device.reset() self.device.reference_source(ref_clock) if self.sequencer == 1: self.device.sequencer1_sync_en(sync_en) else: self.device.sequencer0_sync_en(sync_en) def translate(self, sequence, delay_before_read_out, nshots=None): # Allocate only qubit pulses to PulsarQRM waveforms = self.generate_waveforms(sequence.qcm_pulses) # Generate program without acquire instruction initial_delay = sequence.qcm_pulses[0].start acquire_instruction = "" wait_time = self.duration_base - initial_delay - delay_before_read_out program = self.generate_program(nshots, initial_delay, delay_before_read_out, acquire_instruction, wait_time) return waveforms, program src/qibolab/instruments/instrument.py METASEP from abc import ABC, abstractmethod class Instrument(ABC): """ Parent class for all the instruments connected via TCPIP. """ def __init__(self, ip): self._connected = False self.ip = ip self._signature = f"{type(self).__name__}@{ip}" self.device = None @abstractmethod def connect(self): """ Establish connection with the instrument. Initialize self.device variable """ raise NotImplementedError @property def signature(self): return self._signature @abstractmethod def close(self): """ Close connection with the instrument. Set instrument values to idle values if required. """ raise NotImplementedError class InstrumentException(Exception): def __init__(self, instrument: Instrument, message: str): self.instrument = instrument header = f"InstrumentException with {self.instrument.signature}" full_msg = header + ": " + message super().__init__(full_msg) self.instrument = instrument src/qibolab/instruments/icarusq.py METASEP import pyvisa as visa import numpy as np from typing import List, Optional, Union from qcodes.instrument_drivers.AlazarTech import ATS # Frequency signal generation mode MODE_NYQUIST = 0 MODE_MIXER = 1 # Waveform functions def square(t, start, duration, frequency, amplitude, phase): x = amplitude * (1 * (start < t) & 1 * (start+duration > t)) i = x * np.cos(2 * np.pi * frequency * t + phase[0]) q = - x * np.sin(2 * np.pi * frequency * t + phase[1]) return i, q def TTL(t, start, duration, amplitude): x = amplitude * (1 * (start < t) & 1 * (start + duration > t)) return x def sine(t, start, duration, frequency, amplitude, phase): x = amplitude * (1 * (start < t) & 1 * (start+duration > t)) wfm = x * np.sin(2 * np.pi * frequency * t + phase) return wfm class Instrument: """Abstract class for instrument methods. """ def connect(self): pass def start(self): pass def stop(self): pass def close(self): pass class VisaInstrument: """Instrument class that uses the VISA I/O standard. Implementation based on qcodes drivers. """ def __init__(self) -> None: self._visa_handle = None def connect(self, address: str, timeout: int = 10000) -> None: """Connects to the instrument. """ rm = visa.ResourceManager() self._visa_handle = rm.open_resource(address, timeout=timeout) def write(self, msg: Union[bytes, str]) -> None: """Writes a message to the instrument. """ self._visa_handle.write(msg) def query(self, msg: Union[bytes, str]) -> str: """Writes a message to the instrument and read the response. """ return self._visa_handle.query(msg) def read(self) -> str: """Waits for and reads the response from the instrument. """ return self._visa_handle.read() def close(self) -> None: """Closes the instrument connection. """ self._visa_handle.close() def ready(self) -> None: """ Blocking command """ self.query("*OPC?") class TektronixAWG5204(VisaInstrument): """Driver for the Tektronix AWG5204 instrument. """ def __init__(self, name, address): VisaInstrument.__init__(self) self.connect(address) self.name = name self._nchannels = 7 self._sampling_rate = None self._mode = None self._amplitude = [0.75, 0.75, 0.75, 0.75] self._sequence_delay = None self._pulse_buffer = None self._adc_delay = None self._qb_delay = None self._ro_delay = None self._ip = None self._channel_phase = None def setup(self, offset: List[Union[int, float]], amplitude: Optional[List[Union[int, float]]] = [0.75, 0.75, 0.75, 0.75], resolution: Optional[int] = 14, sampling_rate: Optional[Union[int, float]] = 2.5e9, mode: int = MODE_MIXER, sequence_delay: float = 60e-6, pulse_buffer: float = 1e-6, adc_delay: float = 282e-9, qb_delay: float = 292e-9, ro_delay: float = 266e-9, ip: str = "192.168.0.2", channel_phase: List[float] = [-0.10821, 0.00349066, 0.1850049, -0.0383972], **kwargs) -> None: """ Setup the instrument and assigns constants to be used for later. Arguments: offset (float[4]): List of aplitude offset per channel in volts. amplitude (float[4]): List of maximum peak-to-peak amplitude per channel in volts. resolution (float): Bit resolution of the AWG DACs. Normally this is assigned per channel but the driver requires all channels to have the same resolution. sampling_rate (float): Sampling rate of the AWG in S/s. mode (int): Nyquist or mixer frequency generation selection. sequence_delay (float): Time between each pulse sequence in seconds. pulse_buffer (float): Pad time before the start of the pulse sequence and after the end of the pulse sequence in seconds. adc_delay (float): Delay for the start of the ADC trigger signal in seconds. qb_delay (float): Delay for the start of the qubit switch TTL signal in seconds. ro_delay (float): Delay for the start of the readout switch TTL signal in seconds. ip (str): IP address for the device for waveform transfer. channel_phase (float[4]): Phase in radians for each channel. Used primarily on mixer mode to promote target sideband. """ # Reset the instrument and assign amplitude, offset and resolution per channel self.reset() for idx in range(4): ch = idx + 1 self.write("SOURCe{}:VOLTage {}".format(ch, amplitude[idx])) self._amplitude[idx] = amplitude[idx] self.write("SOURCE{}:VOLTAGE:LEVEL:IMMEDIATE:OFFSET {}".format(ch, offset[ch - 1])) self.write("SOURce{}:DAC:RESolution {}".format(ch, resolution)) # Set the DAC modes and sampling rate self.write("SOUR1:DMOD NRZ") self.write("SOUR2:DMOD NRZ") self.write("CLOCk:SRATe {}".format(sampling_rate)) if mode == MODE_NYQUIST: self.write("SOUR3:DMOD MIX") self.write("SOUR4:DMOD MIX") else: self.write("SOUR3:DMOD NRZ") self.write("SOUR4:DMOD NRZ") # Assigns constants to be used later self._mode = mode self._sampling_rate = sampling_rate self._pulse_buffer = pulse_buffer self._sequence_delay = sequence_delay self._qb_delay = qb_delay self._ro_delay = ro_delay self._adc_delay = adc_delay self._ip = ip self._channel_phase = channel_phase self.ready() def reset(self) -> None: """Reset the instrument back to AWG mode. """ self.write("INSTrument:MODE AWG") self.write("CLOC:SOUR EFIX") # Set AWG to external reference, 10 MHz self.write("CLOC:OUTP:STAT OFF") # Disable clock output self.clear() def clear(self) -> None: """Clear loaded waveform and sequences. """ self.write('SLISt:SEQuence:DELete ALL') self.write('WLISt:WAVeform:DELete ALL') self.ready() def translate(self, sequence, shots): """ Translates the pulse sequence into Tektronix .seqx file Arguments: sequence (qibolab.pulses.Pulse[]): Array containing pulses to be fired on this instrument. shots (int): Number of repetitions. """ import broadbean as bb from qibolab.pulses import ReadoutPulse from qcodes.instrument_drivers.tektronix.AWG70000A import AWG70000A # First create np arrays for each channel start = min(pulse.start for pulse in sequence) end = max(pulse.start + pulse.duration for pulse in sequence) t = np.arange(start * 1e-9 - self._pulse_buffer, end * 1e-9 + self._pulse_buffer, 1 / self._sampling_rate) wfm = np.zeros((self._nchannels, len(t))) for pulse in sequence: # Convert pulse timings from nanoseconds to seconds start = pulse.start * 1e-9 duration = pulse.duration * 1e-9 if isinstance(pulse, ReadoutPulse): # Readout IQ Signal i_ch = pulse.channel[0] q_ch = pulse.channel[1] phase = (self._channel_phase[i_ch] + pulse.phase, self._channel_phase[q_ch] + pulse.phase) i_wfm, q_wfm = square(t, start, duration, pulse.frequency, pulse.amplitude, phase) wfm[i_ch] += i_wfm wfm[q_ch] += q_wfm # ADC TTL wfm[4] = TTL(t, start + self._adc_delay , 10e-9, 1) # RO SW TTL wfm[5] = TTL(t, start + self._ro_delay, duration, 1) # QB SW TTL wfm[6] = TTL(t, start + self._qb_delay, duration, 1) else: if self._mode == MODE_MIXER: # Qubit IQ signal i_ch = pulse.channel[0] q_ch = pulse.channel[1] phase = (self._channel_phase[i_ch] + pulse.phase, self._channel_phase[q_ch] + pulse.phase) i_wfm, q_wfm = square(t, start, duration, pulse.frequency, pulse.amplitude, phase) wfm[i_ch] += i_wfm wfm[q_ch] += q_wfm else: qb_wfm = sine(t, start, duration, pulse.frequency, pulse.amplitude, pulse.phase) wfm[pulse.channel] += qb_wfm # Add waveform arrays to broadbean sequencing main_sequence = bb.Sequence() main_sequence.name = "MainSeq" main_sequence.setSR(self._sampling_rate) # Dummy waveform on repeat to create delay between shots dummy = np.zeros(len(t)) unit_delay = 1e-6 sample_delay = np.zeros(int(unit_delay * self._sampling_rate)) delay_wfm = bb.Element() for ch in range(1, 5): delay_wfm.addArray(ch, sample_delay, self._sampling_rate, m1=sample_delay, m2=sample_delay) # Add pulses into waveform waveform = bb.Element() waveform.addArray(1, wfm[0], self._sampling_rate, m1=wfm[4], m2=wfm[5]) waveform.addArray(2, wfm[1], self._sampling_rate, m1=dummy, m2=wfm[6]) waveform.addArray(3, wfm[2], self._sampling_rate, m1=dummy, m2=dummy) waveform.addArray(4, wfm[3], self._sampling_rate, m1=dummy, m2=dummy) # Add subsequence to hold pulse waveforms and delay waveform subseq = bb.Sequence() subseq.name = "SubSeq" subseq.setSR(self._sampling_rate) subseq.addElement(1, waveform) subseq.addElement(2, delay_wfm) subseq.setSequencingNumberOfRepetitions(2, int(self._sequence_delay / unit_delay)) # Add sequence to play subsequence up to the number of shots. main_sequence.addSubSequence(1, subseq) main_sequence.setSequencingTriggerWait(1, 1) main_sequence.setSequencingNumberOfRepetitions(1, shots) main_sequence.setSequencingGoto(1, 1) # Compile waveform into payload # TODO: On fresh installation, fix bug in AWG70000A driver with regards to this method. payload = main_sequence.forge(apply_delays=False, apply_filters=False) payload = AWG70000A.make_SEQX_from_forged_sequence(payload, self._amplitude, "MainSeq") return payload def upload(self, payload): """ Uploads the .seqx file to the AWG and loads it """ import time with open("//{}/Users/OEM/Documents/MainSeq.seqx".format(self._ip), "wb+") as w: w.write(payload) pathstr = 'C:\\Users\\OEM\\Documents\\MainSeq.seqx' self.write('MMEMory:OPEN:SASSet:SEQuence "{}"'.format(pathstr)) start = time.time() while True: elapsed = time.time() - start if int(self.query("*OPC?")) == 1: break elif elapsed > self._visa_handle.timeout: raise RuntimeError("AWG took too long to load waveforms") for ch in range(1, 5): self.write('SOURCE{}:CASSet:SEQuence "MainSeq", {}'.format(ch, ch)) self.ready() def play_sequence(self): """ Arms the AWG for playback on trigger A """ for ch in range(1, 5): self.write("OUTPut{}:STATe 1".format(ch)) self.write('SOURce{}:RMODe TRIGgered'.format(ch)) self.write('SOURce1{}TINPut ATRIGGER'.format(ch)) # Arm the trigger self.write('AWGControl:RUN:IMMediate') self.ready() def stop(self): """ Stops the AWG and turns off all channels """ self.write('AWGControl:STOP') for ch in range(1, 5): self.write("OUTPut{}:STATe 0".format(ch)) def start_experiment(self): """ Triggers the AWG to start playing """ self.write('TRIGger:IMMediate ATRigger') class MCAttenuator(Instrument): """Driver for the MiniCircuit RCDAT-8000-30 variable attenuator. """ def __init__(self, name, address): self.name = name self._address = address def setup(self, attenuation: float): """Assigns the attenuation level on the attenuator. Arguments: attenuation(float): Attenuation setting in dB. Ranges from 0 to 35. """ import urllib3 http = urllib3.PoolManager() http.request('GET', 'http://{}/SETATT={}'.format(self._address, attenuation)) class QuicSyn(VisaInstrument): """Driver for the National Instrument QuicSyn Lite local oscillator. """ def __init__(self, name, address): VisaInstrument.__init__(self) self.name = name self.connect(address) self.write('0601') # EXT REF def setup(self, frequency): """ Sets the frequency in Hz """ self.write('FREQ {0:f}Hz'.format(frequency)) def start(self): """Starts the instrument. """ self.write('0F01') def stop(self): """Stops the instrument. """ self.write('0F00') class AlazarADC(ATS.AcquisitionController, Instrument): """Driver for the AlazarTech ATS9371 ADC. """ def __init__(self, name="alz_cont", address="Alazar1", **kwargs): from qibolab.instruments.ATS9371 import AlazarTech_ATS9371 self.adc = AlazarTech_ATS9371(address) self.acquisitionkwargs = {} self.samples_per_record = None self.records_per_buffer = None self.buffers_per_acquisition = None self.results = None self.number_of_channels = 2 self.buffer = None self._samples = None self._thread = None self._processed_data = None super().__init__(name, address, **kwargs) self.add_parameter("acquisition", get_cmd=self.do_acquisition) def setup(self, samples): """Setup the ADC. Arguments: samples (int): Number of samples to be acquired. TODO: Set trigger voltage as a variable. """ trigger_volts = 1 input_range_volts = 2.5 trigger_level_code = int(128 + 127 * trigger_volts / input_range_volts) with self.adc.syncing(): self.adc.clock_source("EXTERNAL_CLOCK_10MHz_REF") #self.adc.clock_source("INTERNAL_CLOCK") self.adc.external_sample_rate(1_000_000_000) #self.adc.sample_rate(1_000_000_000) self.adc.clock_edge("CLOCK_EDGE_RISING") self.adc.decimation(1) self.adc.coupling1('DC') self.adc.coupling2('DC') self.adc.channel_range1(.02) #self.adc.channel_range2(.4) self.adc.channel_range2(.02) self.adc.impedance1(50) self.adc.impedance2(50) self.adc.bwlimit1("DISABLED") self.adc.bwlimit2("DISABLED") self.adc.trigger_operation('TRIG_ENGINE_OP_J') self.adc.trigger_engine1('TRIG_ENGINE_J') self.adc.trigger_source1('EXTERNAL') self.adc.trigger_slope1('TRIG_SLOPE_POSITIVE') self.adc.trigger_level1(trigger_level_code) self.adc.trigger_engine2('TRIG_ENGINE_K') self.adc.trigger_source2('DISABLE') self.adc.trigger_slope2('TRIG_SLOPE_POSITIVE') self.adc.trigger_level2(128) self.adc.external_trigger_coupling('DC') self.adc.external_trigger_range('ETR_2V5') self.adc.trigger_delay(0) #self.aux_io_mode('NONE') # AUX_IN_TRIGGER_ENABLE for seq mode on #self.aux_io_param('NONE') # TRIG_SLOPE_POSITIVE for seq mode on self.adc.timeout_ticks(0) self._samples = samples def update_acquisitionkwargs(self, **kwargs): """ This method must be used to update the kwargs used for the acquisition with the alazar_driver.acquire :param kwargs: :return: """ self.acquisitionkwargs.update(**kwargs) def arm(self, shots): """Arms the ADC for acqusition. Arguments: shots (int): Number of trigger signals to be expected. TODO: Wait for ADC to be ready for acquisition instead of fixed time duration. """ import threading import time self.update_acquisitionkwargs(mode='NPT', samples_per_record=self._samples, records_per_buffer=10, buffers_per_acquisition=int(shots / 10), allocated_buffers=100, buffer_timeout=10000) self.pre_start_capture() self._thread = threading.Thread(target=self.do_acquisition, args=()) self._thread.start() time.sleep(1) def pre_start_capture(self): self.samples_per_record = self.adc.samples_per_record.get() self.records_per_buffer = self.adc.records_per_buffer.get() self.buffers_per_acquisition = self.adc.buffers_per_acquisition.get() sample_speed = self.adc.get_sample_rate() t_final = self.samples_per_record / sample_speed self.time_array = np.arange(0, t_final, 1 / sample_speed) self.buffer = np.zeros(self.samples_per_record * self.records_per_buffer * self.number_of_channels) def pre_acquire(self): """ See AcquisitionController :return: """ # this could be used to start an Arbitrary Waveform Generator, etc... # using this method ensures that the contents are executed AFTER the # Alazar card starts listening for a trigger pulse pass def handle_buffer(self, data, buffer_number=None): """ See AcquisitionController :return: """ self.buffer += data def post_acquire(self): """ See AcquisitionController :return: """ def signal_to_volt(signal, voltdiv): u12 = signal / 16 #bitsPerSample = 12 codeZero = 2047.5 codeRange = codeZero return voltdiv * (u12 - codeZero) / codeRange records_per_acquisition = (1. * self.buffers_per_acquisition * self.records_per_buffer) recordA = np.zeros(self.samples_per_record) recordB = np.zeros(self.samples_per_record) # Interleaved samples for i in range(self.records_per_buffer): record_start = i * self.samples_per_record * 2 record_stop = record_start + self.samples_per_record * 2 record_slice = self.buffer[record_start:record_stop] recordA += record_slice[0::2] / records_per_acquisition recordB += record_slice[1::2] / records_per_acquisition recordA = signal_to_volt(recordA, 0.02) recordB = signal_to_volt(recordB, 0.02) self._processed_data = np.array([recordA, recordB]) return self.buffer, self.buffers_per_acquisition, self.records_per_buffer, self.samples_per_record, self.time_array def do_acquisition(self): """ this method performs an acquisition, which is the get_cmd for the acquisiion parameter of this instrument :return: """ self._get_alazar().acquire(acquisition_controller=self, **self.acquisitionkwargs) def result(self, readout_frequency, readout_channels=[0, 1]): """Returns the processed signal result from the ADC. Arguments: readout_frequency (float): Frequency to be used for signal processing. readout_channels (int[]): Channels to be used for signal processing. Returns: ampl (float): Amplitude of the processed signal. phase (float): Phase shift of the processed signal in degrees. it (float): I component of the processed signal. qt (float): Q component of the processed signal. """ self._thread.join() input_vec_I = self._processed_data[readout_channels[0]] input_vec_Q = self._processed_data[readout_channels[1]] it = 0 qt = 0 for i in range(self.samples_per_record): it += input_vec_I[i] * np.cos(2 * np.pi * readout_frequency * self.time_array[i]) qt += input_vec_Q[i] * np.cos(2 * np.pi * readout_frequency * self.time_array[i]) phase = np.arctan2(qt, it) * 180 / np.pi ampl = np.sqrt(it**2 + qt**2) return ampl, phase, it, qt def close(self): """Closes the instrument. """ self._alazar.close() super().close() src/qibolab/instruments/__init__.py METASEP from qibolab.instruments.qblox import PulsarQRM, PulsarQCM from qibolab.instruments.rohde_schwarz import SGS100A from qibolab.instruments.icarusq import TektronixAWG5204, AlazarADC, MCAttenuator, QuicSyn src/qibolab/instruments/ATS9371.py METASEP """ Adapted from the qcodes ATS9373 driver """ from distutils.version import LooseVersion import numpy as np from qcodes.utils import validators from qcodes.instrument_drivers.AlazarTech.ATS import AlazarTech_ATS from qcodes.instrument_drivers.AlazarTech.utils import TraceParameter class AlazarTech_ATS9371(AlazarTech_ATS): """ This class is the driver for the ATS9373 board. Note that this board is very similar to ATS9360. Refer to ATS SDK for details. Note that channels of this board have 12-bit resolution (see `IDN()['bits_per_sample']`) which means that the raw data that is returned by the card should be converted to uint16 type with a bit shift by 4 bits. Refer to ATS SDK for more infromation. """ samples_divisor = 128 _trigger_holdoff_min_fw_version = '30.04' def __init__(self, name, **kwargs): dll_path = 'C:\\WINDOWS\\System32\\ATSApi.dll' super().__init__(name, dll_path=dll_path, **kwargs) # add parameters # ----- Parameters for the configuration of the board ----- self.add_parameter(name='clock_source', parameter_class=TraceParameter, get_cmd=None, label='Clock Source', unit=None, initial_value='INTERNAL_CLOCK', val_mapping={'INTERNAL_CLOCK': 1, 'FAST_EXTERNAL_CLOCK': 2, 'EXTERNAL_CLOCK_10MHz_REF': 7}) self.add_parameter(name='external_sample_rate', get_cmd=None, parameter_class=TraceParameter, label='External Sample Rate', unit='S/s', vals=validators.MultiType(validators.Ints(300000000, 1000000000), validators.Enum('UNDEFINED')), initial_value='UNDEFINED') self.add_parameter(name='sample_rate', get_cmd=None, parameter_class=TraceParameter, label='Internal Sample Rate', unit='S/s', initial_value='UNDEFINED', val_mapping={1_000: 1, 2_000: 2, 5_000: 4, 10_000: 8, 20_000: 10, 50_000: 12, 100_000: 14, 200_000: 16, 500_000: 18, 1_000_000: 20, 2_000_000: 24, 5_000_000: 26, 10_000_000: 28, 20_000_000: 30, 25_000_000: 33, 50_000_000: 34, 100_000_000: 36, 125_000_000: 37, 160_000_000: 38, 180_000_000: 39, 200_000_000: 40, 250_000_000: 43, 500_000_000: 48, 800_000_000: 50, 1_000_000_000: 53, 'EXTERNAL_CLOCK': 64, 'UNDEFINED': 'UNDEFINED'}) self.add_parameter(name='clock_edge', get_cmd=None, parameter_class=TraceParameter, label='Clock Edge', unit=None, initial_value='CLOCK_EDGE_RISING', val_mapping={'CLOCK_EDGE_RISING': 0, 'CLOCK_EDGE_FALLING': 1}) self.add_parameter(name='decimation', get_cmd=None, parameter_class=TraceParameter, label='Decimation', unit=None, initial_value=1, vals=validators.Ints(0, 100000)) for i in ['1', '2']: self.add_parameter(name='coupling' + i, get_cmd=None, parameter_class=TraceParameter, label='Coupling channel ' + i, unit=None, initial_value='DC', val_mapping={'AC': 1, 'DC': 2}) self.add_parameter(name='channel_range' + i, get_cmd=None, parameter_class=TraceParameter, label='Range channel ' + i, unit='V', initial_value=0.4, val_mapping={0.02: 1, 0.04: 2, 0.05: 3, 0.08: 4, 0.1: 5, 0.2: 6, 0.4: 7, 0.5: 8, 0.8: 9 }) self.add_parameter(name='impedance' + i, get_cmd=None, parameter_class=TraceParameter, label='Impedance channel ' + i, unit='Ohm', initial_value=50, val_mapping={50: 2}) self.add_parameter(name='bwlimit' + i, get_cmd=None, parameter_class=TraceParameter, label='Bandwidth limit channel ' + i, unit=None, initial_value='DISABLED', val_mapping={'DISABLED': 0, 'ENABLED': 1}) self.add_parameter(name='trigger_operation', get_cmd=None, parameter_class=TraceParameter, label='Trigger Operation', unit=None, initial_value='TRIG_ENGINE_OP_J', val_mapping={'TRIG_ENGINE_OP_J': 0, 'TRIG_ENGINE_OP_K': 1, 'TRIG_ENGINE_OP_J_OR_K': 2, 'TRIG_ENGINE_OP_J_AND_K': 3, 'TRIG_ENGINE_OP_J_XOR_K': 4, 'TRIG_ENGINE_OP_J_AND_NOT_K': 5, 'TRIG_ENGINE_OP_NOT_J_AND_K': 6}) for i in ['1', '2']: self.add_parameter(name='trigger_engine' + i, get_cmd=None, parameter_class=TraceParameter, label='Trigger Engine ' + i, unit=None, initial_value='TRIG_ENGINE_' + ('J' if i == '1' else 'K'), val_mapping={'TRIG_ENGINE_J': 0, 'TRIG_ENGINE_K': 1}) self.add_parameter(name='trigger_source' + i, get_cmd=None, parameter_class=TraceParameter, label='Trigger Source ' + i, unit=None, initial_value='EXTERNAL', val_mapping={'CHANNEL_A': 0, 'CHANNEL_B': 1, 'EXTERNAL': 2, 'DISABLE': 3}) self.add_parameter(name='trigger_slope' + i, get_cmd=None, parameter_class=TraceParameter, label='Trigger Slope ' + i, unit=None, initial_value='TRIG_SLOPE_POSITIVE', val_mapping={'TRIG_SLOPE_POSITIVE': 1, 'TRIG_SLOPE_NEGATIVE': 2}) self.add_parameter(name='trigger_level' + i, get_cmd=None, parameter_class=TraceParameter, label='Trigger Level ' + i, unit=None, initial_value=140, vals=validators.Ints(0, 255)) self.add_parameter(name='external_trigger_coupling', get_cmd=None, parameter_class=TraceParameter, label='External Trigger Coupling', unit=None, initial_value='DC', val_mapping={'AC': 1,'DC': 2}) self.add_parameter(name='external_trigger_range', get_cmd=None, parameter_class=TraceParameter, label='External Trigger Range', unit=None, initial_value='ETR_2V5', val_mapping={'ETR_TTL': 2, 'ETR_2V5': 3}) self.add_parameter(name='trigger_delay', get_cmd=None, parameter_class=TraceParameter, label='Trigger Delay', unit='Sample clock cycles', initial_value=0, vals=validators.Multiples(divisor=8, min_value=0)) # See Table 3 - Trigger Delay Alignment # TODO: this is either 8 or 16 dependent on the number of channels in use # NOTE: The board will wait for a for this amount of time for a # trigger event. If a trigger event does not arrive, then the # board will automatically trigger. Set the trigger timeout value # to 0 to force the board to wait forever for a trigger event. # # IMPORTANT: The trigger timeout value should be set to zero after # appropriate trigger parameters have been determined, otherwise # the board may trigger if the timeout interval expires before a # hardware trigger event arrives. self.add_parameter(name='timeout_ticks', get_cmd=None, parameter_class=TraceParameter, label='Timeout Ticks', unit='10 us', initial_value=0, vals=validators.Ints(min_value=0)) self.add_parameter(name='aux_io_mode', get_cmd=None, parameter_class=TraceParameter, label='AUX I/O Mode', unit=None, initial_value='AUX_IN_AUXILIARY', val_mapping={'AUX_OUT_TRIGGER': 0, 'AUX_IN_TRIGGER_ENABLE': 1, 'AUX_IN_AUXILIARY': 13}) self.add_parameter(name='aux_io_param', get_cmd=None, parameter_class=TraceParameter, label='AUX I/O Param', unit=None, initial_value='NONE', val_mapping={'NONE': 0, 'TRIG_SLOPE_POSITIVE': 1, 'TRIG_SLOPE_NEGATIVE': 2}) # ----- Parameters for the acquire function ----- self.add_parameter(name='mode', label='Acquisition mode', unit=None, initial_value='NPT', get_cmd=None, set_cmd=None, val_mapping={'NPT': 0x200, 'TS': 0x400}) self.add_parameter(name='samples_per_record', label='Samples per Record', unit=None, initial_value=1024, get_cmd=None, set_cmd=None, vals=validators.Multiples( divisor=self.samples_divisor, min_value=256)) self.add_parameter(name='records_per_buffer', label='Records per Buffer', unit=None, initial_value=10, get_cmd=None, set_cmd=None, vals=validators.Ints(min_value=0)) self.add_parameter(name='buffers_per_acquisition', label='Buffers per Acquisition', unit=None, get_cmd=None, set_cmd=None, initial_value=10, vals=validators.Ints(min_value=0)) self.add_parameter(name='channel_selection', label='Channel Selection', unit=None, get_cmd=None, set_cmd=None, initial_value='AB', val_mapping={'A': 1, 'B': 2, 'AB': 3}) self.add_parameter(name='transfer_offset', label='Transfer Offset', unit='Samples', get_cmd=None, set_cmd=None, initial_value=0, vals=validators.Ints(min_value=0)) self.add_parameter(name='external_startcapture', label='External Startcapture', unit=None, get_cmd=None, set_cmd=None, initial_value='ENABLED', val_mapping={'DISABLED': 0X0, 'ENABLED': 0x1}) self.add_parameter(name='enable_record_headers', label='Enable Record Headers', unit=None, get_cmd=None, set_cmd=None, initial_value='DISABLED', val_mapping={'DISABLED': 0x0, 'ENABLED': 0x8}) self.add_parameter(name='alloc_buffers', label='Alloc Buffers', unit=None, get_cmd=None, set_cmd=None, initial_value='DISABLED', val_mapping={'DISABLED': 0x0, 'ENABLED': 0x20}) self.add_parameter(name='fifo_only_streaming', label='Fifo Only Streaming', unit=None, get_cmd=None, set_cmd=None, initial_value='DISABLED', val_mapping={'DISABLED': 0x0, 'ENABLED': 0x800}) self.add_parameter(name='interleave_samples', label='Interleave Samples', unit=None, get_cmd=None, set_cmd=None, initial_value='DISABLED', val_mapping={'DISABLED': 0x0, 'ENABLED': 0x1000}) self.add_parameter(name='get_processed_data', label='Get Processed Data', unit=None, get_cmd=None, set_cmd=None, initial_value='DISABLED', val_mapping={'DISABLED': 0x0, 'ENABLED': 0x2000}) self.add_parameter(name='allocated_buffers', label='Allocated Buffers', unit=None, get_cmd=None, set_cmd=None, initial_value=4, vals=validators.Ints(min_value=0)) self.add_parameter(name='buffer_timeout', label='Buffer Timeout', unit='ms', get_cmd=None, set_cmd=None, initial_value=1000, vals=validators.Ints(min_value=0)) self.add_parameter(name='trigger_holdoff', label='Trigger Holdoff', docstring=f'If enabled Alazar will ' f'ignore any additional triggers ' f'while capturing a record. If disabled ' f'this will result in corrupt data. ' f'Support for this requires at least ' f'firmware version ' f'{self._trigger_holdoff_min_fw_version}', vals=validators.Bool(), get_cmd=self._get_trigger_holdoff, set_cmd=self._set_trigger_holdoff) model = self.get_idn()['model'] if model != 'ATS9371': raise Exception("The Alazar board kind is not 'ATS9371'," " found '" + str(model) + "' instead.") def _get_trigger_holdoff(self) -> bool: fwversion = self.get_idn()['firmware'] if LooseVersion(fwversion) < \ LooseVersion(self._trigger_holdoff_min_fw_version): return False # we want to check if the 26h bit (zero indexed) is high or not output = np.uint32(self._read_register(58)) # the two first two chars in the bit string is the sign and a 'b' # remove those to only get the bit pattern bitmask = bin(output)[2:] # all prefixed zeros are ignored in the bit conversion so the # bit mask may be shorter than what we expect. in that case # the bit we care about is zero so we return False if len(bitmask) < 27: return False return bool(bin(output)[-27]) def _set_trigger_holdoff(self, value: bool) -> None: fwversion = self.get_idn()['firmware'] if LooseVersion(fwversion) < \ LooseVersion(self._trigger_holdoff_min_fw_version): raise RuntimeError(f"Alazar 9360 requires at least firmware " f"version {self._trigger_holdoff_min_fw_version}" f" for trigger holdoff support. " f"You have version {fwversion}") current_value = self._read_register(58) if value is True: # to enable trigger hold off we want to flip the # 26th bit to 1. We do that by making a bitwise or # with a number that has a 1 on the 26th place and zero # otherwise. We use numpy.unit32 instead of python numbers # to have unsigned ints of the right size enable_mask = np.uint32(1 << 26) new_value = current_value | enable_mask else: # to disable trigger hold off we want to flip the # 26th bit to 0. We do that by making a bitwise and # with a number that has a 0 on the 26th place and 1 # otherwise disable_mask = ~np.uint32(1 << 26) # pylint: disable=E1130 new_value = current_value & disable_mask self._write_register(58, new_value) src/qibolab/states.py METASEP from qibo import K from qibo.abstractions.states import AbstractState from qibo.config import raise_error class HardwareState(AbstractState): def __init__(self, nqubits=None): if nqubits > 1: raise_error(NotImplementedError, "Hardware device has one qubit.") super().__init__(nqubits) self.readout = None self.normalized_voltage = None self.min_voltage = None self.max_voltage = None @property def shape(self): # pragma: no cover raise_error(NotImplementedError) @property def dtype(self): # pragma: no cover raise_error(NotImplementedError) def symbolic(self, decimals=5, cutoff=1e-10, max_terms=20): # pragma: no cover raise_error(NotImplementedError) def __array__(self): # pragma: no cover raise_error(NotImplementedError) def numpy(self): # pragma: no cover raise_error(NotImplementedError) def state(self, numpy=False, decimals=-1, cutoff=1e-10, max_terms=20): raise_error(NotImplementedError) @classmethod def from_readout(cls, readout, min_voltage, max_voltage): state = cls(1) state.readout = readout state.min_voltage = min_voltage state.max_voltage = max_voltage norm = max_voltage - min_voltage state.normalized_voltage = (readout[0] * 1e6 - min_voltage) / norm return state @classmethod def zero_state(cls, nqubits): # pragma: no cover raise_error(NotImplementedError) @classmethod def plus_state(cls, nqubits): # pragma: no cover raise_error(NotImplementedError) def copy(self, min_voltage=None, max_voltage=None): new = super().copy() new.readout = self.readout if min_voltage is not None: new.min_voltage = min_voltage else: new.min_voltage = self.min_voltage if max_voltage is not None: new.max_voltage = max_voltage else: new.max_voltage = self.max_voltage norm = new.max_voltage - new.min_voltage new.normalized_voltage = (new.readout[0] * 1e6 - new.min_voltage) / norm return new def to_density_matrix(self): # pragma: no cover raise_error(NotImplementedError) def probabilities(self, qubits=None, measurement_gate=None): p = self.normalized_voltage return K.cast([p, 1 - p], dtype="DTYPE") def measure(self, gate, nshots, registers=None): # pragma: no cover raise_error(NotImplementedError) def set_measurements(self, qubits, samples, registers=None): # pragma: no cover raise_error(NotImplementedError) def samples(self, binary=True, registers=False): # pragma: no cover raise_error(NotImplementedError) def frequencies(self, binary=True, registers=False): # pragma: no cover raise_error(NotImplementedError) def apply_bitflips(self, p0, p1=None): # pragma: no cover raise_error(NotImplementedError, "Noise simulation is not required for hardware.") def expectation(self, hamiltonian, normalize=False): # pragma: no cover # FIXME: This is the expectation value of <Z> only! return 2 * self.probabilities()[0] - 1 src/qibolab/pulses.py METASEP """Pulse abstractions.""" import bisect import numpy as np class Pulse: """Describes a single pulse to be added to waveform array. Args: start (float): Start time of pulse in ns. duration (float): Pulse duration in ns. amplitude (float): Pulse digital amplitude (unitless) [0 to 1]. frequency (float): Pulse Intermediate Frequency in Hz [10e6 to 300e6]. phase (float): To be added. shape: (PulseShape): Pulse shape. See :py:mod:`qibolab.pulses_shapes` for list of available shapes. offset_i (float): Optional pulse I offset (unitless). (amplitude + offset) should be between [0 and 1]. offset_q (float): Optional pulse Q offset (unitless). (amplitude + offset) should be between [0 and 1]. channel (int/str): Specifies the device that will execute this pulse. FPGA channel (int) for IcarusQ or qrm/qcm (str) for TIIq. qubit (int): Target qubit ID Example: .. code-block:: python from qibolab.pulses import Pulse from qibolab.pulse_shapes import Gaussian # define pulse with Gaussian shape pulse = Pulse(start=0, frequency=200000000.0, amplitude=0.3, duration=60, phase=0, shape=Gaussian(60 / 5)) """ def __init__(self, start, duration, amplitude, frequency, phase, shape, offset_i=0, offset_q=0, channel="qcm", qubit=0): # FIXME: Since the ``start`` value depends on the previous pulses we are # not sure if it should be a local property of the ``Pulse`` object self.start = start self.duration = duration self.amplitude = amplitude self.frequency = frequency self.phase = phase self.shape = shape # PulseShape objects self.channel = channel self.offset_i = offset_i self.offset_q = offset_q self.qubit = qubit def serial(self): return "P({}, {}, {}, {}, {}, {}, {})".format(self.channel, self.start, self.duration, self.amplitude, self.frequency, self.phase, self.shape) ### IcarusQ specific method ### #def compile(self, waveform, sequence): # i_start = bisect.bisect(sequence.time, self.start) # #i_start = int((self.start / sequence.duration) * sequence.sample_size) # i_duration = int((self.duration / sequence.duration) * sequence.sample_size) # time = sequence.time[i_start:i_start + i_duration] # envelope = self.shape.envelope(time, self.start, self.duration, self.amplitude) # waveform[self.channel, i_start:i_start + i_duration] += ( # envelope * np.sin(2 * np.pi * self.frequency * time + self.phase)) # return waveform def compile(self): return self.shape.envelope(None, None, self.duration, self.amplitude) def __repr__(self): return self.serial() class ReadoutPulse(Pulse): """Describes a readout pulse. See :class:`qibolab.pulses.Pulse` for argument desciption. """ def __init__(self, start, duration, amplitude, frequency, phase, shape, offset_i=0, offset_q=0, channel="qrm", qubit=0): super().__init__(start, duration, amplitude, frequency, phase, shape, offset_i, offset_q, channel, qubit) class IQReadoutPulse(Pulse): # TODO: Remove this or think how to merge with ``ReadoutPulse``. # Currently keeping it for compatibility with IcarusQ as it breaks the import """ Describes a pair of IQ pulses for the readout Args: channels (int): Pair of FPGA channels to play pulses on. start (float): Start time of pulse in seconds. duration (float): Pulse duration in seconds. amplitude (float): Pulse amplitude in volts. frequency (float): Pulse frequency in Hz. phases (float): Pulse phase offset for mixer sideband. """ def __init__(self, channels, start, duration, amplitude, frequency, phases): self.channels = channels self.start = start self.duration = duration self.amplitude = amplitude self.frequency = frequency self.phases = phases def serial(self): return "" def compile(self, waveform, sequence): i_start = bisect.bisect(sequence.time, self.start) #i_start = int((self.start / sequence.duration) * sequence.sample_size) i_duration = int((self.duration / sequence.duration) * sequence.sample_size) time = sequence.time[i_start:i_start + i_duration] waveform[self.channels[0], i_start:i_start + i_duration] += self.amplitude * np.cos(2 * np.pi * self.frequency * time + self.phases[0]) waveform[self.channels[1], i_start:i_start + i_duration] -= self.amplitude * np.sin(2 * np.pi * self.frequency * time + self.phases[1]) return waveform class MultifrequencyPulse(Pulse): """Describes multiple pulses to be added to waveform array. Used when multiple pulses are overlapping to avoid overwrite. """ def __init__(self, members): self.members = members def serial(self): return "M({})".format(", ".join([m.serial() for m in self.members])) def compile(self, waveform, sequence): for member in self.members: waveform += member.compile(waveform, sequence) return waveform class FilePulse(Pulse): """Commands the FPGA to load a file as a waveform array in the specified channel.""" def __init__(self, channel, start, filename): self.channel = channel self.start = start self.filename = filename def serial(self): return "F({}, {}, {})".format(self.channel, self.start, self.filename) def compile(self, waveform, sequence): # `FilePulse` cannot be tested in CI because a file is not available i_start = int((self.start / sequence.duration) * sequence.sample_size) arr = np.genfromtxt(sequence.file_dir, delimiter=',')[:-1] waveform[self.channel, i_start:i_start + len(arr)] = arr return waveform src/qibolab/pulse_shapes.py METASEP import numpy as np from abc import ABC, abstractmethod from qibo.config import raise_error class PulseShape(ABC): """Describes the pulse shape to be used.""" def __init__(self): # pragma: no cover self.name = "" @abstractmethod def envelope(self, time, start, duration, amplitude): # pragma: no cover raise_error(NotImplementedError) def __repr__(self): return "({})".format(self.name) class Rectangular(PulseShape): """Rectangular/square pulse shape.""" def __init__(self): self.name = "rectangular" def envelope(self, time, start, duration, amplitude): """Constant amplitude envelope.""" #return amplitude # FIXME: This may have broken IcarusQ return amplitude * np.ones(int(duration)) class Gaussian(PulseShape): """Gaussian pulse shape""" def __init__(self, sigma): self.name = "gaussian" self.sigma = sigma def envelope(self, time, start, duration, amplitude): """Gaussian envelope centered with respect to the pulse. .. math:: A\exp^{-\\frac{1}{2}\\frac{(t-\mu)^2}{\sigma^2}} """ from scipy.signal import gaussian return amplitude * gaussian(int(duration), std=self.sigma) # FIXME: This may have broken IcarusQ #mu = start + duration / 2 #return amplitude * np.exp(-0.5 * (time - mu) ** 2 / self.sigma ** 2) def __repr__(self): return "({}, {})".format(self.name, self.sigma) class Drag(PulseShape): """Derivative Removal by Adiabatic Gate (DRAG) pulse shape.""" def __init__(self, sigma, beta): self.name = "drag" self.sigma = sigma self.beta = beta def envelope(self, time, start, duration, amplitude): """DRAG envelope centered with respect to the pulse. .. math:: G + i\\beta(-\\frac{t-\mu}{\sigma^2})G where .. math:: G = A\exp^{-\\frac{1}{2}\\frac{(t-\mu)^2}{\sigma^2}} """ mu = start + duration / 2 gaussian = amplitude * np.exp(-0.5 * (time - mu) ** 2 / self.sigma ** 2) return gaussian + 1j * self.beta * (-(time - mu) / self.sigma ** 2) * gaussian def __repr__(self): return "({}, {}, {})".format(self.name, self.sigma, self.beta) class SWIPHT(PulseShape): """Speeding up Wave forms by Inducing Phase to Harmful Transitions pulse shape.""" def __init__(self, g): self.name = "SWIPHT" self.g = g def envelope(self, time, start, duration, amplitude): ki_qq = self.g * np.pi t_g = 5.87 / (2 * abs(ki_qq)) t = np.linspace(0, t_g, len(time)) gamma = 138.9 * (t / t_g)**4 *(1 - t / t_g)**4 + np.pi / 4 gamma_1st = 4 * 138.9 * (t / t_g)**3 * (1 - t / t_g)**3 * (1 / t_g - 2 * t / t_g**2) gamma_2nd = 4*138.9*(t / t_g)**2 * (1 - t / t_g)**2 * (14*(t / t_g**2)**2 - 14*(t / t_g**3) + 3 / t_g**2) omega = gamma_2nd / np.sqrt(ki_qq**2 - gamma_1st**2) - 2*np.sqrt(ki_qq**2 - gamma_1st**2) * 1 / np.tan(2 * gamma) omega = omega / max(omega) return omega * amplitude def __repr__(self): return "({}, {})".format(self.name, self.g) src/qibolab/platform.py METASEP import pathlib from qibo.config import raise_error def Platform(name, runcard=None): """Platform for controlling quantum devices. Args: name (str): name of the platform. Options are 'tiiq', 'qili' and 'icarusq'. runcard (str): path to the yaml file containing the platform setup. Returns: The plaform class. """ if not runcard: runcard = pathlib.Path(__file__).parent / "runcards" / f"{name}.yml" if name == 'tiiq' or name == 'qili': from qibolab.platforms.qbloxplatform import QBloxPlatform as Device elif name == 'icarusq': from qibolab.platforms.icplatform import ICPlatform as Device else: raise_error(RuntimeError, f"Platform {name} is not supported.") return Device(name, runcard) src/qibolab/gates.py METASEP import sys import math import copy from abc import ABC, abstractmethod from qibo import gates from qibo.config import raise_error class AbstractHardwareGate(ABC): module = sys.modules[__name__] @abstractmethod def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) @abstractmethod def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) @abstractmethod def to_sequence(self, sequence): # pragma: no cover """Adds the pulses implementing the gate to the given ``PulseSequence``.""" raise_error(NotImplementedError) class H(AbstractHardwareGate, gates.H): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): q = self.target_qubits[0] composite = [RY(q, math.pi / 2), RX(q, math.pi)] pulses = [] for gate in composite: pulses.extend(gate.pulse_sequence(qubit_config, qubit_times, qubit_phases)) return pulses def duration(self, qubit_config): d = 0 q = self.target_qubits[0] composite = [RY(q, math.pi / 2), RX(q, math.pi)] for gate in composite: d += gate.duration(qubit_config) return d def to_sequence(self, sequence): q = self.target_qubits[0] sequence.add_u3(7 * math.pi / 2, math.pi, 0, q) class I(AbstractHardwareGate, gates.I): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): return [] def duration(self, qubit_config): return 0 def to_sequence(self, sequence): pass class Align(AbstractHardwareGate, gates.I): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): m = max(qubit_times[q] for q in self.target_qubits) for q in self.target_qubits: qubit_times[q] = m return [] def duration(self, qubit_config): return 0 def to_sequence(self, sequence): raise_error(NotImplementedError) class M(AbstractHardwareGate, gates.M): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): pulses = [] for q in self.target_qubits: pulses += copy.deepcopy(qubit_config[q].gates.get(self)) return pulses def duration(self, qubit_config): pulses = [] for q in self.target_qubits: pulses += copy.deepcopy(qubit_config[q].gates.get(self)) m = 0 for p in pulses: m = max(p.duration, m) return m def to_sequence(self, sequence): for q in self.target_qubits: sequence.add_measurement(q) class RX(AbstractHardwareGate, gates.RX): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): if self.parameters == 0: return [] q = self.target_qubits[0] time_mod = abs(self.parameters / math.pi) phase_mod = 0 if self.parameters > 0 else -180 phase_mod += qubit_phases[q] m = 0 pulses = copy.deepcopy(qubit_config[q].gates.get(self)) for p in pulses: duration = p.duration * time_mod p.start = qubit_times[q] p.phase += phase_mod p.duration = duration m = max(duration, m) qubit_times[q] += m return pulses def duration(self, qubit_config): q = self.target_qubits[0] time_mod = abs(self.parameters / math.pi) pulses = copy.deepcopy(qubit_config[q].gates.get(self)) m = 0 for p in pulses: m = max(p.duration * time_mod, m) return m def to_sequence(self, sequence): q = self.target_qubits[0] theta = self.parameters phi = - math.pi / 2 lam = math.pi / 2 sequence.add_u3(theta, phi, lam, q) class RY(AbstractHardwareGate, gates.RY): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): return RX.pulse_sequence(self, qubit_config, qubit_times, qubit_phases) def duration(self, qubit_config): return RX.duration(self, qubit_config) def to_sequence(self, sequence): q = self.target_qubits[0] theta = self.parameters phi = 0 lam = 0 sequence.add_u3(theta, phi, lam, q) class RZ(AbstractHardwareGate, gates.RZ): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): # apply virtually by changing ``phase`` instead of using pulses sequence.phase += self.parameters #theta = 0 #phi = self.parameters / 2 #lam = self.parameters / 2 #return sequence.add_u3(theta, phi, lam) class CNOT(AbstractHardwareGate, gates.CNOT): # CNOT gate is not tested because `qubit_config` placeholder is single qubit def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): q = self.target_qubits[0] control = self.control_qubits[0] start = max(qubit_times[q], qubit_times[control]) pulses = copy.deepcopy(qubit_config[q].gates.get(self)) for p in pulses: duration = p.duration p.start = start p.phase = qubit_phases[q] p.duration = duration qubit_times[q] = start + duration qubit_times[control] = qubit_times[q] return pulses def duration(self, qubit_config): q = self.target_qubits[0] control = self.control_qubits[0] m = 0 pulses = qubit_config[q]["gates"][self.name + "_{}".format(control)] for p in pulses: m = max(p.duration, m) return m def to_sequence(self, sequence): raise_error(NotImplementedError) class U2(AbstractHardwareGate, gates.U2): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): args = (math.pi / 2,) + self.parameters sequence.add_u3(*args) class U3(AbstractHardwareGate, gates.U3): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): sequence.add_u3(*self.parameters) class X(AbstractHardwareGate, gates.X): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): sequence.add_u3(math.pi, 0, math.pi) class Y(AbstractHardwareGate, gates.Y): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): sequence.add_u3(math.pi, 0, 0) class Z(AbstractHardwareGate, gates.Z): def pulse_sequence(self, qubit_config, qubit_times, qubit_phases): # pragma: no cover raise_error(NotImplementedError) def duration(self, qubit_config): # pragma: no cover raise_error(NotImplementedError) def to_sequence(self, sequence): sequence.add_u3(0, math.pi, 0) src/qibolab/circuit.py METASEP from qibo import K from qibolab import states, pulses from qibo.config import raise_error from qibo.core import circuit import numpy as np class PulseSequence: """List of pulses. Holds a separate list for each instrument. """ def __init__(self): super().__init__() self.qcm_pulses = [] self.qrm_pulses = [] self.time = 0 self.phase = 0 self.pulses = [] def add(self, pulse): """Add a pulse to the sequence. Args: pulse (:class:`qibolab.pulses.Pulse`): Pulse object to add. Example: .. code-block:: python from qibolab.pulses import Pulse, ReadoutPulse from qibolab.circuit import PulseSequence from qibolab.pulse_shapes import Rectangular, Gaussian # define two arbitrary pulses pulse1 = Pulse(start=0, frequency=200000000.0, amplitude=0.3, duration=60, phase=0, shape=Gaussian(60 / 5))) pulse2 = ReadoutPulse(start=70, frequency=20000000.0, amplitude=0.5, duration=3000, phase=0, shape=Rectangular())) # define the pulse sequence sequence = PulseSequence() # add pulses to the pulse sequence sequence.add(pulse1) sequence.add(pulse2) """ if pulse.channel == "qrm" or pulse.channel == 1: self.qrm_pulses.append(pulse) else: self.qcm_pulses.append(pulse) self.pulses.append(pulse) def add_u3(self, theta, phi, lam, qubit=0): """Add pulses that implement a U3 gate. Args: theta, phi, lam (float): Parameters of the U3 gate. """ from qibolab.pulse_shapes import Gaussian # Pi/2 pulse from calibration if hasattr(K.platform, "qubits"): kwargs = K.platform.fetch_qubit_pi_pulse(qubit) else: kwargs = { "amplitude": K.platform.pi_pulse_amplitude, "duration": K.platform.pi_pulse_duration, "frequency": K.platform.pi_pulse_frequency } kwargs["duration"] = kwargs["duration"] // 2 delay = K.platform.delay_between_pulses duration = kwargs.get("duration") kwargs["shape"] = Gaussian(duration / 5) self.phase += phi - np.pi / 2 kwargs["start"] = self.time kwargs["phase"] = self.phase self.add(pulses.Pulse(**kwargs)) self.time += duration + delay self.phase += np.pi - theta kwargs["start"] = self.time kwargs["phase"] = self.phase self.add(pulses.Pulse(**kwargs)) self.time += duration + delay self.phase += lam - np.pi / 2 def add_measurement(self, qubit=0): """Add measurement pulse.""" from qibolab.pulse_shapes import Rectangular if hasattr(K.platform, "qubits"): kwargs = K.platform.fetch_qubit_readout_pulse(qubit) else: kwargs = K.platform.readout_pulse kwargs["start"] = self.time + K.platform.delay_before_readout kwargs["phase"] = self.phase kwargs["shape"] = Rectangular() self.add(pulses.ReadoutPulse(**kwargs)) class HardwareCircuit(circuit.Circuit): def __init__(self, nqubits): if nqubits > 1: raise ValueError("Device has only one qubit.") super().__init__(nqubits) def execute(self, initial_state=None, nshots=None): if initial_state is not None: raise_error(ValueError, "Hardware backend does not support " "initial state in circuits.") if self.measurement_gate is None: raise_error(RuntimeError, "No measurement register assigned.") # Translate gates to pulses and create a ``PulseSequence`` sequence = PulseSequence() for gate in self.queue: gate.to_sequence(sequence) self.measurement_gate.to_sequence(sequence) # Execute the pulse sequence on the platform K.platform.connect() K.platform.setup() K.platform.start() readout = K.platform(sequence, nshots) K.platform.stop() if hasattr(K.platform, "qubits"): q = self.measurement_gate.target_qubits[0] qubit = K.platform.fetch_qubit(q) min_v = qubit.min_readout_voltage max_v = qubit.max_readout_voltage else: min_v = K.platform.min_readout_voltage max_v = K.platform.max_readout_voltage return states.HardwareState.from_readout(readout, min_v, max_v) src/qibolab/backend.py METASEP import os from qibolab.platform import Platform from qibo.backends.numpy import NumpyBackend from qibo.config import raise_error class QibolabBackend(NumpyBackend): # pragma: no cover description = "" # TODO: Write proper description def __init__(self): super().__init__() self.name = "qibolab" self.custom_gates = True self.is_hardware = True self.platform = self.set_platform(os.environ.get("QIBOLAB_PLATFORM", "tiiq")) def set_platform(self, platform): self.platform = Platform(platform) def get_platform(self): return self.platform.name def circuit_class(self, accelerators=None, density_matrix=False): if accelerators is not None: raise_error(NotImplementedError, "Hardware backend does not support " "multi-GPU configuration.") if density_matrix: raise_error(NotImplementedError, "Hardware backend does not support " "density matrix simulation.") from qibolab.circuit import HardwareCircuit return HardwareCircuit def create_gate(self, cls, *args, **kwargs): from qibolab import gates return getattr(gates, cls.__name__)(*args, **kwargs) def create_einsum_cache(self, qubits, nqubits, ncontrol=None): # pragma: no cover raise_error(NotImplementedError, "`create_einsum_cache` method is " "not required for hardware backends.") def einsum_call(self, cache, state, matrix): # pragma: no cover raise_error(NotImplementedError, "`einsum_call` method is not required " "for hardware backends.") src/qibolab/__init__.py METASEP __version__ = "0.0.1.dev1" from qibolab.platform import Platform src/qibolab/calibration/calibration.py METASEP
[ { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration\n resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()\n\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_freq\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\"))\n print(utils.get_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\"))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_freq\", float(resonator_freq))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\", float(avg_min_voltage))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\", float(max_ro_voltage))\n # utils.save_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\", float(resonator_freq - 20_000_000))\n\n #run and save qubit spectroscopy calibration\n qubit_freq, min_ro_voltage, smooth_dataset, dataset = self.run_qubit_spectroscopy()\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_freq\"))\n print(utils.get_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_freq\", float(qubit_freq))\n # utils.save_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\", float(qubit_freq + 200_000_000))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\", float(min_ro_voltage))\n\n # #run Rabi and save Pi pulse params from calibration\n dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1 = self.run_rabi_pulse_length()\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_duration\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_gain\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_duration\", int(pi_pulse_duration))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\", float(pi_pulse_amplitude)) \n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_gain\", float(pi_pulse_gain))\n # utils.save_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\", float(rabi_oscillations_pi_pulse_min_voltage))\n\n # #run calibration_qubit_states", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration\n resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()\n\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_freq\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\"))\n print(utils.get_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\"))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_freq\", float(resonator_freq))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\", float(avg_min_voltage))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\", float(max_ro_voltage))\n # utils.save_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\", float(resonator_freq - 20_000_000))\n\n #run and save qubit spectroscopy calibration", "type": "infile" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration\n resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()\n\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_freq\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\"))\n print(utils.get_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\"))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_freq\", float(resonator_freq))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\", float(avg_min_voltage))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\", float(max_ro_voltage))\n # utils.save_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\", float(resonator_freq - 20_000_000))\n\n #run and save qubit spectroscopy calibration\n qubit_freq, min_ro_voltage, smooth_dataset, dataset = self.run_qubit_spectroscopy()\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_freq\"))\n print(utils.get_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_freq\", float(qubit_freq))\n # utils.save_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\", float(qubit_freq + 200_000_000))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\", float(min_ro_voltage))\n\n # #run Rabi and save Pi pulse params from calibration", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration\n resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()\n\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_freq\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\"))\n print(utils.get_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\"))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_freq\", float(resonator_freq))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\", float(avg_min_voltage))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\", float(max_ro_voltage))\n # utils.save_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\", float(resonator_freq - 20_000_000))\n\n #run and save qubit spectroscopy calibration\n qubit_freq, min_ro_voltage, smooth_dataset, dataset = self.run_qubit_spectroscopy()\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_freq\"))\n print(utils.get_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_freq\", float(qubit_freq))\n # utils.save_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\", float(qubit_freq + 200_000_000))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\", float(min_ro_voltage))\n\n # #run Rabi and save Pi pulse params from calibration\n dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1 = self.run_rabi_pulse_length()\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_duration\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_gain\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_duration\", int(pi_pulse_duration))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\", float(pi_pulse_amplitude)) \n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_gain\", float(pi_pulse_gain))\n # utils.save_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\", float(rabi_oscillations_pi_pulse_min_voltage))\n\n # #run calibration_qubit_states\n all_gnd_states, mean_gnd_states, all_exc_states, mean_exc_states = self.callibrate_qubit_states()\n # #TODO: save in runcard mean_gnd_states and mean_exc_states\n print(all_gnd_states)\n print(mean_gnd_states)\n print(all_exc_states)\n print(mean_exc_states)\n\n # #TODO: Remove plot qubit states results when tested\n utils.plot_qubit_states(all_gnd_states, all_exc_states)\n\n #TODO: Remove 0 and 1 classification from auto calibration when tested\n #Classify all points into 0 and 1\n classified_gnd_results = []\n for point in all_gnd_states: \n classified_gnd_results.add(utils.classify(point, mean_gnd_states, mean_exc_states))\n\n classified_exc_results = []\n for point in all_exc_states:", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration\n resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()\n\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_freq\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\"))\n print(utils.get_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\"))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_freq\", float(resonator_freq))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\", float(avg_min_voltage))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\", float(max_ro_voltage))\n # utils.save_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\", float(resonator_freq - 20_000_000))\n\n #run and save qubit spectroscopy calibration\n qubit_freq, min_ro_voltage, smooth_dataset, dataset = self.run_qubit_spectroscopy()\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_freq\"))\n print(utils.get_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_freq\", float(qubit_freq))\n # utils.save_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\", float(qubit_freq + 200_000_000))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\", float(min_ro_voltage))\n\n # #run Rabi and save Pi pulse params from calibration\n dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1 = self.run_rabi_pulse_length()\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_duration\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_gain\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_duration\", int(pi_pulse_duration))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\", float(pi_pulse_amplitude)) \n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_gain\", float(pi_pulse_gain))\n # utils.save_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\", float(rabi_oscillations_pi_pulse_min_voltage))\n\n # #run calibration_qubit_states\n all_gnd_states, mean_gnd_states, all_exc_states, mean_exc_states = self.callibrate_qubit_states()\n # #TODO: save in runcard mean_gnd_states and mean_exc_states\n print(all_gnd_states)\n print(mean_gnd_states)\n print(all_exc_states)\n print(mean_exc_states)\n\n # #TODO: Remove plot qubit states results when tested\n utils.plot_qubit_states(all_gnd_states, all_exc_states)\n\n #TODO: Remove 0 and 1 classification from auto calibration when tested\n #Classify all points into 0 and 1\n classified_gnd_results = []\n for point in all_gnd_states: ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration\n resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()\n\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_freq\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\"))\n print(utils.get_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\"))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_freq\", float(resonator_freq))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\", float(avg_min_voltage))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\", float(max_ro_voltage))\n # utils.save_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\", float(resonator_freq - 20_000_000))\n\n #run and save qubit spectroscopy calibration\n qubit_freq, min_ro_voltage, smooth_dataset, dataset = self.run_qubit_spectroscopy()\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_freq\"))\n print(utils.get_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_freq\", float(qubit_freq))\n # utils.save_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\", float(qubit_freq + 200_000_000))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\", float(min_ro_voltage))\n\n # #run Rabi and save Pi pulse params from calibration\n dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1 = self.run_rabi_pulse_length()\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_duration\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_gain\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_duration\", int(pi_pulse_duration))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\", float(pi_pulse_amplitude)) \n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_gain\", float(pi_pulse_gain))\n # utils.save_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\", float(rabi_oscillations_pi_pulse_min_voltage))\n\n # #run calibration_qubit_states\n all_gnd_states, mean_gnd_states, all_exc_states, mean_exc_states = self.callibrate_qubit_states()\n # #TODO: save in runcard mean_gnd_states and mean_exc_states\n print(all_gnd_states)\n print(mean_gnd_states)\n print(all_exc_states)\n print(mean_exc_states)\n\n # #TODO: Remove plot qubit states results when tested", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n ", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration\n resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()\n\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_freq\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\"))\n print(utils.get_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\"))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_freq\", float(resonator_freq))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\", float(avg_min_voltage))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\", float(max_ro_voltage))\n # utils.save_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\", float(resonator_freq - 20_000_000))\n\n #run and save qubit spectroscopy calibration\n qubit_freq, min_ro_voltage, smooth_dataset, dataset = self.run_qubit_spectroscopy()\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_freq\"))\n print(utils.get_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_freq\", float(qubit_freq))\n # utils.save_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\", float(qubit_freq + 200_000_000))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\", float(min_ro_voltage))\n\n # #run Rabi and save Pi pulse params from calibration\n dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1 = self.run_rabi_pulse_length()\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_duration\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_gain\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_duration\", int(pi_pulse_duration))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\", float(pi_pulse_amplitude)) \n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_gain\", float(pi_pulse_gain))\n # utils.save_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\", float(rabi_oscillations_pi_pulse_min_voltage))\n\n # #run calibration_qubit_states\n all_gnd_states, mean_gnd_states, all_exc_states, mean_exc_states = self.callibrate_qubit_states()\n # #TODO: save in runcard mean_gnd_states and mean_exc_states\n print(all_gnd_states)\n print(mean_gnd_states)\n print(all_exc_states)\n print(mean_exc_states)\n\n # #TODO: Remove plot qubit states results when tested\n utils.plot_qubit_states(all_gnd_states, all_exc_states)\n\n #TODO: Remove 0 and 1 classification from auto calibration when tested\n #Classify all points into 0 and 1\n classified_gnd_results = []\n for point in all_gnd_states: \n classified_gnd_results.add(utils.classify(point, mean_gnd_states, mean_exc_states))\n\n classified_exc_results = []\n for point in all_exc_states:\n classified_exc_results.add(utils.classify(point, mean_gnd_states, mean_exc_states))\n\n print(classified_gnd_results)\n print(classified_exc_results)\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()", "type": "inproject" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))", "type": "common" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()", "type": "common" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):", "type": "common" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() ", "type": "common" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration\n resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()\n\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_freq\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\"))\n print(utils.get_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\"))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_freq\", float(resonator_freq))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\", float(avg_min_voltage))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\", float(max_ro_voltage))\n # utils.save_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\", float(resonator_freq - 20_000_000))\n\n #run and save qubit spectroscopy calibration\n qubit_freq, min_ro_voltage, smooth_dataset, dataset = self.run_qubit_spectroscopy()\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_freq\"))\n print(utils.get_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_freq\", float(qubit_freq))\n # utils.save_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\", float(qubit_freq + 200_000_000))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\", float(min_ro_voltage))\n\n # #run Rabi and save Pi pulse params from calibration\n dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1 = self.run_rabi_pulse_length()\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_duration\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_gain\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_duration\", int(pi_pulse_duration))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\", float(pi_pulse_amplitude)) \n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_gain\", float(pi_pulse_gain))\n # utils.save_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\", float(rabi_oscillations_pi_pulse_min_voltage))\n\n # #run calibration_qubit_states\n all_gnd_states, mean_gnd_states, all_exc_states, mean_exc_states = self.callibrate_qubit_states()\n # #TODO: save in runcard mean_gnd_states and mean_exc_states\n print(all_gnd_states)\n print(mean_gnd_states)\n print(all_exc_states)\n print(mean_exc_states)\n\n # #TODO: Remove plot qubit states results when tested\n utils.plot_qubit_states(all_gnd_states, all_exc_states)\n\n #TODO: Remove 0 and 1 classification from auto calibration when tested\n #Classify all points into 0 and 1\n classified_gnd_results = []\n for point in all_gnd_states: \n classified_gnd_results.add(utils.classify(point, mean_gnd_states, mean_exc_states))\n\n classified_exc_results = []\n for point in all_exc_states:\n classified_exc_results.add(utils.classify(point, mean_gnd_states, mean_exc_states))\n\n print(classified_gnd_results)\n print(classified_exc_results)\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value", "type": "common" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses ", "type": "common" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration\n resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()\n\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_freq\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\"))\n print(utils.get_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\"))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_freq\", float(resonator_freq))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\", float(avg_min_voltage))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\", float(max_ro_voltage))\n # utils.save_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\", float(resonator_freq - 20_000_000))\n\n #run and save qubit spectroscopy calibration\n qubit_freq, min_ro_voltage, smooth_dataset, dataset = self.run_qubit_spectroscopy()\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_freq\"))\n print(utils.get_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_freq\", float(qubit_freq))\n # utils.save_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\", float(qubit_freq + 200_000_000))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\", float(min_ro_voltage))\n\n # #run Rabi and save Pi pulse params from calibration\n dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1 = self.run_rabi_pulse_length()\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_duration\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_gain\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_duration\", int(pi_pulse_duration))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\", float(pi_pulse_amplitude)) \n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_gain\", float(pi_pulse_gain))\n # utils.save_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\", float(rabi_oscillations_pi_pulse_min_voltage))\n\n # #run calibration_qubit_states\n all_gnd_states, mean_gnd_states, all_exc_states, mean_exc_states = self.callibrate_qubit_states()\n # #TODO: save in runcard mean_gnd_states and mean_exc_states\n print(all_gnd_states)\n print(mean_gnd_states)\n print(all_exc_states)\n print(mean_exc_states)\n\n # #TODO: Remove plot qubit states results when tested\n utils.plot_qubit_states(all_gnd_states, all_exc_states)\n\n #TODO: Remove 0 and 1 classification from auto calibration when tested\n #Classify all points into 0 and 1\n classified_gnd_results = []\n for point in all_gnd_states: \n classified_gnd_results.add(utils.classify(point, mean_gnd_states, mean_exc_states))\n\n classified_exc_results = []\n for point in all_exc_states:\n classified_exc_results.add(utils.classify(point, mean_gnd_states, mean_exc_states))\n\n print(classified_gnd_results)\n print(classified_exc_results)\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value\n self.ro_pulse.start = value + 4\n\nclass T1WaitParameter():\n label = 'Time'\n unit = 'ns'\n name = 't1_wait'\n initial_value = 0\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.base_duration = qc_pulse.duration\n\n def set(self, value):\n # TODO: implement following condition\n #must be >= 4ns <= 65535\n #platform.delay_before_readout = value\n self.ro_pulse.start = self.base_duration + 4 + value\n\nclass ROController():\n # Quantify Gettable Interface Implementation\n label = ['Amplitude', 'Phase','I','Q']\n unit = ['V', 'Radians','V','V']\n name = ['A', 'Phi','I','Q']\n\n def __init__(self, platform, sequence):\n self.platform = platform\n self.sequence = sequence\n\n def get(self):", "type": "common" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration\n resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()\n\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_freq\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\"))\n print(utils.get_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\"))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_freq\", float(resonator_freq))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\", float(avg_min_voltage))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\", float(max_ro_voltage))\n # utils.save_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\", float(resonator_freq - 20_000_000))\n\n #run and save qubit spectroscopy calibration\n qubit_freq, min_ro_voltage, smooth_dataset, dataset = self.run_qubit_spectroscopy()\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_freq\"))\n print(utils.get_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_freq\", float(qubit_freq))\n # utils.save_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\", float(qubit_freq + 200_000_000))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\", float(min_ro_voltage))\n\n # #run Rabi and save Pi pulse params from calibration\n dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1 = self.run_rabi_pulse_length()\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_duration\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_gain\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_duration\", int(pi_pulse_duration))\n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\", float(pi_pulse_amplitude)) \n # utils.save_config_parameter(\"settings\", \"\", \"pi_pulse_gain\", float(pi_pulse_gain))\n # utils.save_config_parameter(\"settings\", \"\", \"rabi_oscillations_pi_pulse_min_voltage\", float(rabi_oscillations_pi_pulse_min_voltage))\n\n # #run calibration_qubit_states\n all_gnd_states, mean_gnd_states, all_exc_states, mean_exc_states = self.callibrate_qubit_states()\n # #TODO: save in runcard mean_gnd_states and mean_exc_states\n print(all_gnd_states)\n print(mean_gnd_states)\n print(all_exc_states)\n print(mean_exc_states)\n\n # #TODO: Remove plot qubit states results when tested\n utils.plot_qubit_states(all_gnd_states, all_exc_states)\n\n #TODO: Remove 0 and 1 classification from auto calibration when tested\n #Classify all points into 0 and 1\n classified_gnd_results = []\n for point in all_gnd_states: \n classified_gnd_results.add(utils.classify(point, mean_gnd_states, mean_exc_states))\n\n classified_exc_results = []\n for point in all_exc_states:\n classified_exc_results.add(utils.classify(point, mean_gnd_states, mean_exc_states))\n\n print(classified_gnd_results)\n print(classified_exc_results)\n\n# help classes\nclass QCPulseLengthParameter():\n\n label = 'Qubit Control Pulse Length'\n unit = 'ns'\n name = 'qc_pulse_length'\n\n def __init__(self, ro_pulse, qc_pulse):\n self.ro_pulse = ro_pulse\n self.qc_pulse = qc_pulse\n\n def set(self, value):\n self.qc_pulse.duration = value", "type": "common" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):", "type": "common" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting", "type": "non_informative" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1", "type": "non_informative" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_exc_states.add(point)\n\n platform.stop()\n\n return all_gnd_states, np.mean(all_gnd_states), all_exc_states, np.mean(all_exc_states)\n\n def auto_calibrate_plaform(self):\n platform = self.platform\n\n #backup latest platform runcard\n utils.backup_config_file(platform)\n\n #run and save cavity spectroscopy calibration\n resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()\n\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_freq\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\"))\n print(utils.get_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\"))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_freq\", float(resonator_freq))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_avg_min_ro_voltage\", float(avg_min_voltage))\n # utils.save_config_parameter(\"settings\", \"\", \"resonator_spectroscopy_max_ro_voltage\", float(max_ro_voltage))\n # utils.save_config_parameter(\"LO_QRM_settings\", \"\", \"frequency\", float(resonator_freq - 20_000_000))\n\n #run and save qubit spectroscopy calibration\n qubit_freq, min_ro_voltage, smooth_dataset, dataset = self.run_qubit_spectroscopy()\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_freq\"))\n print(utils.get_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\"))\n print(utils.get_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\"))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_freq\", float(qubit_freq))\n # utils.save_config_parameter(\"LO_QCM_settings\", \"\", \"frequency\", float(qubit_freq + 200_000_000))\n # utils.save_config_parameter(\"settings\", \"\", \"qubit_spectroscopy_min_ro_voltage\", float(min_ro_voltage))\n\n # #run Rabi and save Pi pulse params from calibration\n dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1 = self.run_rabi_pulse_length()\n print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_duration\"))", "type": "non_informative" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed", "type": "non_informative" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])", "type": "random" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()", "type": "random" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']", "type": "random" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1\n precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 11, 2)\n qubit_freq = dataset['x0'].values[smooth_dataset.argmin()] - qc_pulse.frequency\n min_ro_voltage = smooth_dataset.min() * 1e6\n\n print(f\"\\nQubit Frequency = {qubit_freq}\")\n utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)\n print(\"Qubit freq ontained from MC results: \", qubit_freq)\n f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")\n qubit_freq = (f0*1e9 - qc_pulse.frequency)\n print(\"Qubit freq ontained from fitting: \", qubit_freq)\n return qubit_freq, min_ro_voltage, smooth_dataset, dataset\n\n def run_rabi_pulse_length(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['rabi_pulse_length']\n pulse_duration_start = ds['pulse_duration_start']\n pulse_duration_end = ds['pulse_duration_end']\n pulse_duration_step = ds['pulse_duration_step']\n\n\n mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))\n mc.setpoints(np.arange(pulse_duration_start, pulse_duration_end, pulse_duration_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)\n platform.stop()\n \n # Fitting\n pi_pulse_amplitude = qc_pulse.amplitude\n smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)\n pi_pulse_gain = platform.qcm.gain\n utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)\n\n print(f\"\\nPi pulse duration = {pi_pulse_duration}\")\n print(f\"\\nPi pulse amplitude = {pi_pulse_amplitude}\") #Check if the returned value from fitting is correct.\n print(f\"\\nPi pulse gain = {pi_pulse_gain}\") #Needed? It is equal to the QCM gain when performing a Rabi.\n print(f\"\\nrabi oscillation min voltage = {rabi_oscillations_pi_pulse_min_voltage}\")\n print(f\"\\nT1 = {t1}\")\n\n return dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1\n\n # T1: RX(pi) - wait t(rotates z) - readout\n def run_t1(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pi_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['t1']\n delay_before_readout_start = ds['delay_before_readout_start']\n delay_before_readout_end = ds['delay_before_readout_end']\n delay_before_readout_step = ds['delay_before_readout_step']\n\n\n mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))\n mc.setpoints(np.arange(delay_before_readout_start,\n delay_before_readout_end,\n delay_before_readout_step))\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start()\n dataset = mc.run('T1', soft_avg = software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset, t1 = fitting.t1_fit(dataset)\n utils.plot(smooth_dataset, dataset, \"t1\", 1)\n print(f'\\nT1 = {t1}')\n\n return t1, smooth_dataset, dataset\n\n def callibrate_qubit_states(self):\n \n platform = self.platform\n platform.reload_settings()\n ps = platform.settings['settings']\n niter=10\n nshots=1\n\n #create exc and gnd pulses \n start = 0\n frequency = ps['pi_pulse_frequency']\n amplitude = ps['pi_pulse_amplitude']\n duration = ps['pi_pulse_duration']\n phase = 0\n shape = eval(ps['pi_pulse_shape'])\n qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)\n\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n \n exc_sequence = PulseSequence()\n exc_sequence.add(qc_pi_pulse)\n gnd_sequence.add(ro_pulse)\n\n gnd_sequence = PulseSequence()\n #ro_pulse.start=0\n gnd_sequence.add(ro_pulse)\n\n platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)\n platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)\n platform.start()\n\n #Exectue niter single gnd shots\n platform.LO_qcm.off()\n all_gnd_states = []\n for i in range(niter):\n qubit_state = platform.execute(gnd_sequence, nshots)\n #Compose complex point from i, q obtained from execution\n point = complex(qubit_state[2], qubit_state[3])\n all_gnd_states.add(point)\n\n #Exectue niter single exc shots\n platform.LO_qcm.on()\n all_exc_states = []\n for i in range(niter):\n qubit_state = platform.execute(exc_sequence, nshots)\n #Compose complex point from i, q obtained from execution", "type": "random" }, { "content": "import pathlib\nimport numpy as np\n#import matplotlib.pyplot as plt\nimport utils\nimport yaml\nimport fitting\nfrom qibolab import Platform\n\n# TODO: Have a look in the documentation of ``MeasurementControl``\n#from quantify_core.measurement import MeasurementControl\nfrom quantify_core.measurement.control import Gettable, Settable\nfrom quantify_core.data.handling import set_datadir\nfrom scipy.signal import savgol_filter\nfrom qibolab.pulses import Pulse, ReadoutPulse\nfrom qibolab.circuit import PulseSequence\nfrom qibolab.pulse_shapes import Rectangular, Gaussian\n\n\n# TODO: Check why this set_datadir is needed\n#set_datadir(pathlib.Path(\"data\") / \"quantify\")\nset_datadir(pathlib.Path(__file__).parent / \"data\" / \"quantify\")\n\nclass Calibration():\n\n def __init__(self, platform: Platform):\n self.platform = platform\n self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')\n\n def load_settings(self):\n # Load diagnostics settings\n with open(\"calibration.yml\", \"r\") as file:\n return yaml.safe_load(file)\n\n def run_resonator_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['resonator_spectroscopy']\n lowres_width = ds['lowres_width']\n lowres_step = ds['lowres_step']\n highres_width = ds['highres_width']\n highres_step = ds['highres_step']\n precision_width = ds['precision_width']\n precision_step = ds['precision_step']\n\n #Fast Sweep\n scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])\n avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6\n\n # Precision Sweep\n scanrange = np.arange(-precision_width, precision_width, precision_step)\n mc.settables(platform.LO_qrm.device.frequency)\n mc.setpoints(scanrange + platform.LO_qrm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n platform.LO_qcm.off()\n dataset = mc.run(\"Resonator Spectroscopy Precision\", soft_avg=software_averages)\n platform.stop()\n\n # Fitting\n smooth_dataset = savgol_filter(dataset['y0'].values, 25, 2)\n # resonator_freq = dataset['x0'].values[smooth_dataset.argmax()] + ro_pulse.frequency\n max_ro_voltage = smooth_dataset.max() * 1e6\n\n f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")\n resonator_freq = (f0*1e9 + ro_pulse.frequency)\n\n print(f\"\\nResonator Frequency = {resonator_freq}\")\n return resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset\n\n def run_qubit_spectroscopy(self):\n platform = self.platform\n platform.reload_settings()\n mc = self.mc\n \n ps = platform.settings['settings']\n qc_pulse_shape = eval(ps['qc_spectroscopy_pulse'].popitem()[1])\n qc_pulse_settings = ps['qc_spectroscopy_pulse']\n qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)\n ro_pulse_shape = eval(ps['readout_pulse'].popitem()[1])\n ro_pulse_settings = ps['readout_pulse']\n ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)\n sequence = PulseSequence()\n sequence.add(qc_pulse)\n sequence.add(ro_pulse)\n\n ds = self.load_settings()\n self.pl.tuids_max_num(ds['max_num_plots'])\n software_averages = ds['software_averages']\n ds = ds['qubit_spectroscopy']\n fast_start = ds['fast_start']\n fast_end = ds['fast_end']\n fast_step = ds['fast_step']\n precision_start = ds['precision_start']\n precision_end = ds['precision_end']\n precision_step = ds['precision_step']\n \n # Fast Sweep\n fast_sweep_scan_range = np.arange(fast_start, fast_end, fast_step)\n mc.settables(platform.LO_qcm.device.frequency)\n mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())\n mc.gettables(Gettable(ROController(platform, sequence)))\n platform.start() \n dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)\n platform.stop()\n \n # Precision Sweep\n platform.software_averages = 1", "type": "random" } ]
[ " mc.gettables(Gettable(ROController(platform, sequence)))", " mc.settables(Settable(T1WaitParameter(ro_pulse, qc_pi_pulse)))", " ds = self.load_settings()", " mc.settables(Settable(QCPulseLengthParameter(ro_pulse, qc_pulse)))", " resonator_freq, avg_min_voltage, max_ro_voltage, smooth_dataset, dataset = self.run_resonator_spectroscopy()", " all_gnd_states, mean_gnd_states, all_exc_states, mean_exc_states = self.callibrate_qubit_states()", " qubit_freq, min_ro_voltage, smooth_dataset, dataset = self.run_qubit_spectroscopy()", " platform.LO_qcm.set_frequency(ps['qubit_freq'] + qc_pi_pulse.frequency)", " scanrange = utils.variable_resolution_scanrange(lowres_width, lowres_step, highres_width, highres_step)", " f0, BW, Q = fitting.lorentzian_fit(\"last\", max, \"Resonator_spectroscopy\")", " sequence.add(qc_pi_pulse)", " f0, BW, Q = fitting.lorentzian_fit(\"last\", min, \"Qubit_Spectroscopy\")", " sequence = PulseSequence()", " smooth_dataset, pi_pulse_duration, rabi_oscillations_pi_pulse_min_voltage, t1 = fitting.rabi_fit(dataset)", " duration = ps['pi_pulse_duration']", " gnd_sequence = PulseSequence()", " dataset, pi_pulse_duration, pi_pulse_amplitude, pi_pulse_gain, rabi_oscillations_pi_pulse_min_voltage, t1 = self.run_rabi_pulse_length()", " mc.settables(platform.LO_qrm.device.frequency)", " platform.LO_qcm.on()", " software_averages = ds['software_averages']", " classified_exc_results.add(utils.classify(point, mean_gnd_states, mean_exc_states))", " exc_sequence.add(qc_pi_pulse)", " sequence.add(qc_pulse)", " utils.plot(smooth_dataset, dataset, \"Qubit_Spectroscopy\", 1)", " platform.LO_qrm.set_frequency(dataset['x0'].values[dataset['y0'].argmax().values])", " dataset = mc.run('Rabi Pulse Length', soft_avg = software_averages)", " utils.backup_config_file(platform)", " utils.plot(smooth_dataset, dataset, \"Rabi_pulse_length\", 1)", " ps = platform.settings['settings']", " classified_gnd_results.add(utils.classify(point, mean_gnd_states, mean_exc_states))", " dataset = mc.run(\"Qubit Spectroscopy Precision\", soft_avg=software_averages)", " platform.software_averages = 1", " platform.LO_qcm.off()", " gnd_sequence.add(ro_pulse)", " platform.reload_settings()", " platform.LO_qrm.set_frequency(ps['resonator_freq'] - ro_pulse.frequency)", " utils.plot_qubit_states(all_gnd_states, all_exc_states)", " all_gnd_states.add(point)", " platform.stop()", " exc_sequence = PulseSequence()", " pi_pulse_amplitude = qc_pulse.amplitude", " mc.setpoints(fast_sweep_scan_range + platform.LO_qcm.get_frequency())", " mc.setpoints(precision_sweep_scan_range + platform.LO_qcm.get_frequency())", " qc_pi_pulse = Pulse(start, duration, amplitude, frequency, phase, shape)", " shape = eval(ps['pi_pulse_shape'])", " qc_pulse = Pulse(**qc_pulse_settings, shape = qc_pulse_shape)", " smooth_dataset, t1 = fitting.t1_fit(dataset)", " pi_pulse_gain = platform.qcm.gain", " self.base_duration = qc_pulse.duration", " self.mc, self.pl, self.ins = utils.create_measurement_control('Calibration')", " utils.plot(smooth_dataset, dataset, \"t1\", 1)", " ro_pulse = ReadoutPulse(**ro_pulse_settings, shape = ro_pulse_shape)", " mc.setpoints(scanrange + platform.LO_qrm.get_frequency())", " dataset = mc.run('T1', soft_avg = software_averages)", " platform.start() ", " dataset = mc.run(\"Resonator Spectroscopy Fast\", soft_avg=1)", " qubit_state = platform.execute(exc_sequence, nshots)", " dataset = mc.run(\"Qubit Spectroscopy Fast\", soft_avg=1)", " self.ro_pulse.start = self.base_duration + 4 + value", " start = 0", " return self.platform.execute(self.sequence)", " self.ro_pulse.start = value + 4", " qubit_state = platform.execute(gnd_sequence, nshots)", "from qibolab import Platform", "", " print(utils.get_config_parameter(\"settings\", \"\", \"pi_pulse_amplitude\"))", "#set_datadir(pathlib.Path(\"data\") / \"quantify\")", " avg_min_voltage = np.mean(dataset['y0'].values[:(lowres_width//lowres_step)]) * 1e6", " mc = self.mc", " precision_step = ds['precision_step']", " point = complex(qubit_state[2], qubit_state[3])", " precision_sweep_scan_range = np.arange(precision_start, precision_end, precision_step)" ]
METASEP